aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/h264.c
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2012-11-21 21:34:46 +0100
committerAnton Khirnov <anton@khirnov.net>2013-03-08 07:38:30 +0100
commit759001c534287a96dc96d1e274665feb7059145d (patch)
tree6ace9560c20aa30db92067c5b45d7bd86e458d10 /libavcodec/h264.c
parent6e7b50b4270116ded8b874d76cb7c5b1a0341827 (diff)
downloadffmpeg-759001c534287a96dc96d1e274665feb7059145d.tar.gz
lavc decoders: work with refcounted frames.
Diffstat (limited to 'libavcodec/h264.c')
-rw-r--r--libavcodec/h264.c418
1 files changed, 247 insertions, 171 deletions
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 0af5cea0d1..498f611a97 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -113,7 +113,7 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
* practice then correct remapping should be added. */
if (ref >= h->ref_count[0])
ref = 0;
- fill_rectangle(&h->cur_pic.f.ref_index[0][4 * h->mb_xy],
+ fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
2, 2, 2, ref, 1);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
@@ -166,28 +166,25 @@ void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
}
}
-static void free_frame_buffer(H264Context *h, Picture *pic)
-{
- ff_thread_release_buffer(h->avctx, &pic->f);
- av_freep(&pic->f.hwaccel_picture_private);
-}
-
-static void free_picture(H264Context *h, Picture *pic)
+static void unref_picture(H264Context *h, Picture *pic)
{
+ int off = offsetof(Picture, tf) + sizeof(pic->tf);
int i;
- if (pic->f.data[0])
- free_frame_buffer(h, pic);
+ if (!pic->f.data[0])
+ return;
+
+ ff_thread_release_buffer(h->avctx, &pic->tf);
+ av_buffer_unref(&pic->hwaccel_priv_buf);
- av_freep(&pic->qscale_table_base);
- pic->f.qscale_table = NULL;
- av_freep(&pic->mb_type_base);
- pic->f.mb_type = NULL;
+ av_buffer_unref(&pic->qscale_table_buf);
+ av_buffer_unref(&pic->mb_type_buf);
for (i = 0; i < 2; i++) {
- av_freep(&pic->motion_val_base[i]);
- av_freep(&pic->f.ref_index[i]);
- pic->f.motion_val[i] = NULL;
+ av_buffer_unref(&pic->motion_val_buf[i]);
+ av_buffer_unref(&pic->ref_index_buf[i]);
}
+
+ memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
}
static void release_unused_pictures(H264Context *h, int remove_current)
@@ -195,15 +192,74 @@ static void release_unused_pictures(H264Context *h, int remove_current)
int i;
/* release non reference frames */
- for (i = 0; i < h->picture_count; i++) {
- if (h->DPB[i].f.data[0] && !h->DPB[i].f.reference &&
- (!h->DPB[i].owner2 || h->DPB[i].owner2 == h) &&
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ if (h->DPB[i].f.data[0] && !h->DPB[i].reference &&
(remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
- free_frame_buffer(h, &h->DPB[i]);
+ unref_picture(h, &h->DPB[i]);
}
}
}
+static int ref_picture(H264Context *h, Picture *dst, Picture *src)
+{
+ int ret, i;
+
+ av_assert0(!dst->f.buf[0]);
+ av_assert0(src->f.buf[0]);
+
+ src->tf.f = &src->f;
+ dst->tf.f = &dst->f;
+ ret = ff_thread_ref_frame(&dst->tf, &src->tf);
+ if (ret < 0)
+ goto fail;
+
+
+ dst->qscale_table_buf = av_buffer_ref(src->qscale_table_buf);
+ dst->mb_type_buf = av_buffer_ref(src->mb_type_buf);
+ if (!dst->qscale_table_buf || !dst->mb_type_buf)
+ goto fail;
+ dst->qscale_table = src->qscale_table;
+ dst->mb_type = src->mb_type;
+
+ for (i = 0; i < 2; i ++) {
+ dst->motion_val_buf[i] = av_buffer_ref(src->motion_val_buf[i]);
+ dst->ref_index_buf[i] = av_buffer_ref(src->ref_index_buf[i]);
+ if (!dst->motion_val_buf[i] || !dst->ref_index_buf[i])
+ goto fail;
+ dst->motion_val[i] = src->motion_val[i];
+ dst->ref_index[i] = src->ref_index[i];
+ }
+
+ if (src->hwaccel_picture_private) {
+ dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
+ if (!dst->hwaccel_priv_buf)
+ goto fail;
+ dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
+ }
+
+ for (i = 0; i < 2; i++)
+ dst->field_poc[i] = src->field_poc[i];
+
+ memcpy(dst->ref_poc, src->ref_poc, sizeof(src->ref_poc));
+ memcpy(dst->ref_count, src->ref_count, sizeof(src->ref_count));
+
+ dst->poc = src->poc;
+ dst->frame_num = src->frame_num;
+ dst->mmco_reset = src->mmco_reset;
+ dst->pic_id = src->pic_id;
+ dst->long_ref = src->long_ref;
+ dst->mbaff = src->mbaff;
+ dst->field_picture = src->field_picture;
+ dst->needs_realloc = src->needs_realloc;
+ dst->reference = src->reference;
+
+ return 0;
+fail:
+ unref_picture(h, dst);
+ return ret;
+}
+
+
static int alloc_scratch_buffers(H264Context *h, int linesize)
{
int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
@@ -229,60 +285,86 @@ static int alloc_scratch_buffers(H264Context *h, int linesize)
return 0;
}
-static int alloc_picture(H264Context *h, Picture *pic)
+static int init_table_pools(H264Context *h)
{
const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
const int mb_array_size = h->mb_stride * h->mb_height;
const int b4_stride = h->mb_width * 4 + 1;
const int b4_array_size = b4_stride * h->mb_height * 4;
+
+ h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
+ av_buffer_allocz);
+ h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
+ sizeof(uint32_t), av_buffer_allocz);
+ h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
+ sizeof(int16_t), av_buffer_allocz);
+ h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
+
+ if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
+ !h->ref_index_pool) {
+ av_buffer_pool_uninit(&h->qscale_table_pool);
+ av_buffer_pool_uninit(&h->mb_type_pool);
+ av_buffer_pool_uninit(&h->motion_val_pool);
+ av_buffer_pool_uninit(&h->ref_index_pool);
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static int alloc_picture(H264Context *h, Picture *pic)
+{
int i, ret = 0;
av_assert0(!pic->f.data[0]);
if (h->avctx->hwaccel) {
const AVHWAccel *hwaccel = h->avctx->hwaccel;
- av_assert0(!pic->f.hwaccel_picture_private);
+ av_assert0(!pic->hwaccel_picture_private);
if (hwaccel->priv_data_size) {
- pic->f.hwaccel_picture_private = av_mallocz(hwaccel->priv_data_size);
- if (!pic->f.hwaccel_picture_private)
+ pic->hwaccel_priv_buf = av_buffer_allocz(hwaccel->priv_data_size);
+ if (!pic->hwaccel_priv_buf)
return AVERROR(ENOMEM);
+ pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
}
}
- ret = ff_thread_get_buffer(h->avctx, &pic->f);
+ pic->tf.f = &pic->f;
+ ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
+ AV_GET_BUFFER_FLAG_REF : 0);
if (ret < 0)
goto fail;
h->linesize = pic->f.linesize[0];
h->uvlinesize = pic->f.linesize[1];
- if (pic->f.qscale_table == NULL) {
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->qscale_table_base,
- (big_mb_num + h->mb_stride) * sizeof(uint8_t),
- fail)
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->mb_type_base,
- (big_mb_num + h->mb_stride) * sizeof(uint32_t),
- fail)
- pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
- pic->f.qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
+ if (!h->qscale_table_pool) {
+ ret = init_table_pools(h);
+ if (ret < 0)
+ goto fail;
+ }
- for (i = 0; i < 2; i++) {
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->motion_val_base[i],
- 2 * (b4_array_size + 4) * sizeof(int16_t),
- fail)
- pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->f.ref_index[i],
- 4 * mb_array_size * sizeof(uint8_t), fail)
- }
- pic->f.motion_subsample_log2 = 2;
+ pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
+ pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
+ if (!pic->qscale_table_buf || !pic->mb_type_buf)
+ goto fail;
- pic->f.qstride = h->mb_stride;
- }
+ pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
+ pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
+
+ for (i = 0; i < 2; i++) {
+ pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
+ pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
+ if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
+ goto fail;
- pic->owner2 = h;
+ pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
+ pic->ref_index[i] = pic->ref_index_buf[i]->data;
+ }
+ pic->f.motion_subsample_log2 = 2;
return 0;
fail:
- free_frame_buffer(h, pic);
+ unref_picture(h, pic);
return (ret < 0) ? ret : AVERROR(ENOMEM);
}
@@ -290,9 +372,8 @@ static inline int pic_is_unused(H264Context *h, Picture *pic)
{
if (pic->f.data[0] == NULL)
return 1;
- if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
- if (!pic->owner2 || pic->owner2 == h)
- return 1;
+ if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
+ return 1;
return 0;
}
@@ -300,17 +381,16 @@ static int find_unused_picture(H264Context *h)
{
int i;
- for (i = h->picture_range_start; i < h->picture_range_end; i++) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (pic_is_unused(h, &h->DPB[i]))
break;
}
- if (i == h->picture_range_end)
+ if (i == MAX_PICTURE_COUNT)
return AVERROR_INVALIDDATA;
if (h->DPB[i].needs_realloc) {
h->DPB[i].needs_realloc = 0;
- free_picture(h, &h->DPB[i]);
- avcodec_get_frame_defaults(&h->DPB[i].f);
+ unref_picture(h, &h->DPB[i]);
}
return i;
@@ -561,8 +641,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
// Error resilience puts the current picture in the ref list.
// Don't try to wait on these as it will cause a deadlock.
// Fields can wait on each other, though.
- if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
- (ref->f.reference & 3) != h->picture_structure) {
+ if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
+ (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
if (refs[0][ref_n] < 0)
nrefs[0] += 1;
@@ -574,8 +654,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
int ref_n = h->ref_cache[1][scan8[n]];
Picture *ref = &h->ref_list[1][ref_n];
- if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
- (ref->f.reference & 3) != h->picture_structure) {
+ if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
+ (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
if (refs[1][ref_n] < 0)
nrefs[1] += 1;
@@ -592,7 +672,7 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
static void await_references(H264Context *h)
{
const int mb_xy = h->mb_xy;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
int refs[2][48];
int nrefs[2] = { 0 };
int ref, list;
@@ -664,7 +744,7 @@ static void await_references(H264Context *h)
int row = refs[list][ref];
if (row >= 0) {
Picture *ref_pic = &h->ref_list[list][ref];
- int ref_field = ref_pic->f.reference - 1;
+ int ref_field = ref_pic->reference - 1;
int ref_field_picture = ref_pic->field_picture;
int pic_height = 16 * h->mb_height >> ref_field_picture;
@@ -672,24 +752,24 @@ static void await_references(H264Context *h)
nrefs[list]--;
if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1) - !(row & 1),
pic_height - 1),
1);
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1), pic_height - 1),
0);
} else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN(row * 2 + ref_field,
pic_height - 1),
0);
} else if (FIELD_PICTURE) {
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1),
ref_field);
} else {
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1),
0);
}
@@ -781,7 +861,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
ysh = 3 - (chroma_idc == 2 /* yuv422 */);
if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) {
// chroma offset when predicting from a field of opposite parity
- my += 2 * ((h->mb_y & 1) - (pic->f.reference - 1));
+ my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
}
@@ -1009,13 +1089,17 @@ static void free_tables(H264Context *h, int free_rbsp)
av_freep(&h->mb2b_xy);
av_freep(&h->mb2br_xy);
- if (free_rbsp) {
- for (i = 0; i < h->picture_count && !h->avctx->internal->is_copy; i++)
- free_picture(h, &h->DPB[i]);
+ av_buffer_pool_uninit(&h->qscale_table_pool);
+ av_buffer_pool_uninit(&h->mb_type_pool);
+ av_buffer_pool_uninit(&h->motion_val_pool);
+ av_buffer_pool_uninit(&h->ref_index_pool);
+
+ if (free_rbsp && h->DPB) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
+ unref_picture(h, &h->DPB[i]);
av_freep(&h->DPB);
- h->picture_count = 0;
} else if (h->DPB) {
- for (i = 0; i < h->picture_count; i++)
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
h->DPB[i].needs_realloc = 1;
}
@@ -1164,11 +1248,10 @@ int ff_h264_alloc_tables(H264Context *h)
init_dequant_tables(h);
if (!h->DPB) {
- h->picture_count = MAX_PICTURE_COUNT * FFMAX(1, h->avctx->thread_count);
- h->DPB = av_mallocz_array(h->picture_count, sizeof(*h->DPB));
+ h->DPB = av_mallocz_array(MAX_PICTURE_COUNT, sizeof(*h->DPB));
if (!h->DPB)
return AVERROR(ENOMEM);
- for (i = 0; i < h->picture_count; i++)
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
avcodec_get_frame_defaults(&h->DPB[i].f);
avcodec_get_frame_defaults(&h->cur_pic.f);
}
@@ -1367,8 +1450,6 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
common_init(h);
h->picture_structure = PICT_FRAME;
- h->picture_range_start = 0;
- h->picture_range_end = MAX_PICTURE_COUNT;
h->slice_context_count = 1;
h->workaround_bugs = avctx->workaround_bugs;
h->flags = avctx->flags;
@@ -1408,6 +1489,8 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
h->low_delay = 0;
}
+ avctx->internal->allocate_progress = 1;
+
return 0;
}
@@ -1415,7 +1498,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
#undef REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \
((pic && pic >= old_ctx->DPB && \
- pic < old_ctx->DPB + old_ctx->picture_count) ? \
+ pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \
&new_ctx->DPB[pic - old_ctx->DPB] : NULL)
static void copy_picture_range(Picture **to, Picture **from, int count,
@@ -1427,7 +1510,7 @@ static void copy_picture_range(Picture **to, Picture **from, int count,
for (i = 0; i < count; i++) {
assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
IN_RANGE(from[i], old_base->DPB,
- sizeof(Picture) * old_base->picture_count) ||
+ sizeof(Picture) * MAX_PICTURE_COUNT) ||
!from[i]));
to[i] = REBASE_PICTURE(from[i], new_base, old_base);
}
@@ -1476,7 +1559,7 @@ static int decode_update_thread_context(AVCodecContext *dst,
H264Context *h = dst->priv_data, *h1 = src->priv_data;
int inited = h->context_initialized, err = 0;
int context_reinitialized = 0;
- int i;
+ int i, ret;
if (dst == src || !h1->context_initialized)
return 0;
@@ -1529,12 +1612,16 @@ static int decode_update_thread_context(AVCodecContext *dst,
memset(&h->me, 0, sizeof(h->me));
h->context_initialized = 0;
- h->picture_range_start += MAX_PICTURE_COUNT;
- h->picture_range_end += MAX_PICTURE_COUNT;
+ memset(&h->cur_pic, 0, sizeof(h->cur_pic));
+ avcodec_get_frame_defaults(&h->cur_pic.f);
+ h->cur_pic.tf.f = &h->cur_pic.f;
h->avctx = dst;
h->DPB = NULL;
- h->cur_pic.f.extended_data = h->cur_pic.f.data;
+ h->qscale_table_pool = NULL;
+ h->mb_type_pool = NULL;
+ h->ref_index_pool = NULL;
+ h->motion_val_pool = NULL;
if (ff_h264_alloc_tables(h) < 0) {
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
@@ -1568,15 +1655,17 @@ static int decode_update_thread_context(AVCodecContext *dst,
h->data_partitioning = h1->data_partitioning;
h->low_delay = h1->low_delay;
- memcpy(h->DPB, h1->DPB, h1->picture_count * sizeof(*h1->DPB));
-
- // reset s->picture[].f.extended_data to s->picture[].f.data
- for (i = 0; i < h->picture_count; i++)
- h->DPB[i].f.extended_data = h->DPB[i].f.data;
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ unref_picture(h, &h->DPB[i]);
+ if (h1->DPB[i].f.data[0] &&
+ (ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
+ return ret;
+ }
h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
- h->cur_pic = h1->cur_pic;
- h->cur_pic.f.extended_data = h->cur_pic.f.data;
+ unref_picture(h, &h->cur_pic);
+ if ((ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
+ return ret;
h->workaround_bugs = h1->workaround_bugs;
h->low_delay = h1->low_delay;
@@ -1660,7 +1749,7 @@ int ff_h264_frame_start(H264Context *h)
}
pic = &h->DPB[i];
- pic->f.reference = h->droppable ? 0 : h->picture_structure;
+ pic->reference = h->droppable ? 0 : h->picture_structure;
pic->f.coded_picture_number = h->coded_picture_number++;
pic->field_picture = h->picture_structure != PICT_FRAME;
/*
@@ -1675,8 +1764,9 @@ int ff_h264_frame_start(H264Context *h)
return ret;
h->cur_pic_ptr = pic;
- h->cur_pic = *h->cur_pic_ptr;
- h->cur_pic.f.extended_data = h->cur_pic.f.data;
+ unref_picture(h, &h->cur_pic);
+ if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
+ return ret;
ff_er_frame_start(&h->er);
@@ -1717,7 +1807,7 @@ int ff_h264_frame_start(H264Context *h)
* get released even with set reference, besides SVQ3 and others do not
* mark frames as reference later "naturally". */
if (h->avctx->codec_id != AV_CODEC_ID_SVQ3)
- h->cur_pic_ptr->f.reference = 0;
+ h->cur_pic_ptr->reference = 0;
h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
@@ -1743,7 +1833,6 @@ static void decode_postinit(H264Context *h, int setup_finished)
int i, pics, out_of_order, out_idx;
int invalid = 0, cnt = 0;
- h->cur_pic_ptr->f.qscale_type = FF_QSCALE_TYPE_H264;
h->cur_pic_ptr->f.pict_type = h->pict_type;
if (h->next_output_pic)
@@ -1847,8 +1936,8 @@ static void decode_postinit(H264Context *h, int setup_finished)
assert(pics <= MAX_DELAYED_PIC_COUNT);
h->delayed_pic[pics++] = cur;
- if (cur->f.reference == 0)
- cur->f.reference = DELAYED_PIC_REF;
+ if (cur->reference == 0)
+ cur->reference = DELAYED_PIC_REF;
/* Frame reordering. This code takes pictures from coding order and sorts
* them by their incremental POC value into display order. It supports POC
@@ -1913,10 +2002,9 @@ static void decode_postinit(H264Context *h, int setup_finished)
}
if (pics > h->avctx->has_b_frames) {
- out->f.reference &= ~DELAYED_PIC_REF;
+ out->reference &= ~DELAYED_PIC_REF;
// for frame threading, the owner must be the second field's thread or
// else the first thread can release the picture and reuse it unsafely
- out->owner2 = h;
for (i = out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i + 1];
}
@@ -2350,7 +2438,7 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type,
void ff_h264_hl_decode_mb(H264Context *h)
{
const int mb_xy = h->mb_xy;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
if (CHROMA444) {
@@ -2516,7 +2604,7 @@ static void flush_change(H264Context *h)
h->prev_interlaced_frame = 1;
idr(h);
if (h->cur_pic_ptr)
- h->cur_pic_ptr->f.reference = 0;
+ h->cur_pic_ptr->reference = 0;
h->first_field = 0;
memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
@@ -2533,17 +2621,16 @@ static void flush_dpb(AVCodecContext *avctx)
for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
if (h->delayed_pic[i])
- h->delayed_pic[i]->f.reference = 0;
+ h->delayed_pic[i]->reference = 0;
h->delayed_pic[i] = NULL;
}
flush_change(h);
- for (i = 0; i < h->picture_count; i++) {
- if (h->DPB[i].f.data[0])
- free_frame_buffer(h, &h->DPB[i]);
- }
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
+ unref_picture(h, &h->DPB[i]);
h->cur_pic_ptr = NULL;
+ unref_picture(h, &h->cur_pic);
h->mb_x = h->mb_y = 0;
@@ -2676,7 +2763,7 @@ static int field_end(H264Context *h, int in_setup)
h->mb_y = 0;
if (!in_setup && !h->droppable)
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
if (CONFIG_H264_VDPAU_DECODER &&
@@ -3019,9 +3106,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h0->current_slice = 0;
if (!h0->first_field) {
- if (h->cur_pic_ptr && !h->droppable &&
- h->cur_pic_ptr->owner2 == h) {
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
+ if (h->cur_pic_ptr && !h->droppable) {
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
h->cur_pic_ptr = NULL;
@@ -3240,20 +3326,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
if (h0->first_field) {
assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.data[0]);
- assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
-
- /* Mark old field/frame as completed */
- if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) {
- ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
- last_pic_structure == PICT_BOTTOM_FIELD);
- }
+ assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
/* Previous field is unmatched. Don't display it, but let it
* remain for reference if marked as such. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
- ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
+ ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD);
}
} else {
@@ -3263,7 +3343,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
* pair. Throw away previous field except for reference
* purposes. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
- ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
+ ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD);
}
} else {
@@ -3286,14 +3366,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h->droppable = last_pic_droppable;
return AVERROR_PATCHWELCOME;
}
-
- /* Take ownership of this buffer. Note that if another thread owned
- * the first field of this buffer, we're not operating on that pointer,
- * so the original thread is still responsible for reporting progress
- * on that first field (or if that was us, we just did that above).
- * By taking ownership, we assign responsibility to ourselves to
- * report progress on the second field. */
- h0->cur_pic_ptr->owner2 = h0;
}
}
}
@@ -3308,8 +3380,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h->prev_frame_num++;
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
h->cur_pic_ptr->frame_num = h->prev_frame_num;
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 0);
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 1);
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
h->avctx->err_recognition & AV_EF_EXPLODE)
return ret;
@@ -3339,7 +3411,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
if (h0->first_field) {
assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.data[0]);
- assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
+ assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
@@ -3606,16 +3678,16 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
for (i = 0; i < 16; i++) {
id_list[i] = 60;
- if (h->ref_list[j][i].f.data[0]) {
+ if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) {
int k;
- uint8_t *base = h->ref_list[j][i].f.base[0];
+ AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer;
for (k = 0; k < h->short_ref_count; k++)
- if (h->short_ref[k]->f.base[0] == base) {
+ if (h->short_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = k;
break;
}
for (k = 0; k < h->long_ref_count; k++)
- if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) {
+ if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = h->short_ref_count + k;
break;
}
@@ -3626,12 +3698,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
ref2frm[1] = -1;
for (i = 0; i < 16; i++)
ref2frm[i + 2] = 4 * id_list[i] +
- (h->ref_list[j][i].f.reference & 3);
+ (h->ref_list[j][i].reference & 3);
ref2frm[18 + 0] =
ref2frm[18 + 1] = -1;
for (i = 16; i < 48; i++)
ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
- (h->ref_list[j][i].f.reference & 3);
+ (h->ref_list[j][i].reference & 3);
}
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
@@ -3691,11 +3763,11 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
const int b8_xy = 4 * top_xy + 2;
int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
- AV_COPY128(mv_dst - 1 * 8, h->cur_pic.f.motion_val[list][b_xy + 0]);
+ AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
ref_cache[0 - 1 * 8] =
- ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 0]];
+ ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 0]];
ref_cache[2 - 1 * 8] =
- ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 1]];
+ ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 1]];
} else {
AV_ZERO128(mv_dst - 1 * 8);
AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
@@ -3706,14 +3778,14 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
const int b8_xy = 4 * left_xy[LTOP] + 1;
int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
- AV_COPY32(mv_dst - 1 + 0, h->cur_pic.f.motion_val[list][b_xy + b_stride * 0]);
- AV_COPY32(mv_dst - 1 + 8, h->cur_pic.f.motion_val[list][b_xy + b_stride * 1]);
- AV_COPY32(mv_dst - 1 + 16, h->cur_pic.f.motion_val[list][b_xy + b_stride * 2]);
- AV_COPY32(mv_dst - 1 + 24, h->cur_pic.f.motion_val[list][b_xy + b_stride * 3]);
+ AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
+ AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
+ AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
+ AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
ref_cache[-1 + 0] =
- ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 0]];
+ ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
ref_cache[-1 + 16] =
- ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 1]];
+ ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
} else {
AV_ZERO32(mv_dst - 1 + 0);
AV_ZERO32(mv_dst - 1 + 8);
@@ -3737,7 +3809,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
}
{
- int8_t *ref = &h->cur_pic.f.ref_index[list][4 * mb_xy];
+ int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
@@ -3748,7 +3820,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
}
{
- int16_t(*mv_src)[2] = &h->cur_pic.f.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
+ int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
@@ -3775,7 +3847,7 @@ static int fill_filter_caches(H264Context *h, int mb_type)
left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
if (FRAME_MBAFF) {
- const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]);
+ const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if (h->mb_y & 1) {
if (left_mb_field_flag != curr_mb_field_flag)
@@ -3783,7 +3855,7 @@ static int fill_filter_caches(H264Context *h, int mb_type)
} else {
if (curr_mb_field_flag)
top_xy += h->mb_stride &
- (((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1);
+ (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
if (left_mb_field_flag != curr_mb_field_flag)
left_xy[LBOT] += h->mb_stride;
}
@@ -3797,25 +3869,25 @@ static int fill_filter_caches(H264Context *h, int mb_type)
* This is a conservative estimate: could also check beta_offset
* and more accurate chroma_qp. */
int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
- int qp = h->cur_pic.f.qscale_table[mb_xy];
+ int qp = h->cur_pic.qscale_table[mb_xy];
if (qp <= qp_thresh &&
(left_xy[LTOP] < 0 ||
- ((qp + h->cur_pic.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
+ ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
(top_xy < 0 ||
- ((qp + h->cur_pic.f.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
+ ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
if (!FRAME_MBAFF)
return 1;
if ((left_xy[LTOP] < 0 ||
- ((qp + h->cur_pic.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
+ ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
(top_xy < h->mb_stride ||
- ((qp + h->cur_pic.f.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
+ ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
return 1;
}
}
- top_type = h->cur_pic.f.mb_type[top_xy];
- left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]];
- left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]];
+ top_type = h->cur_pic.mb_type[top_xy];
+ left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
+ left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
if (h->deblocking_filter == 2) {
if (h->slice_table[top_xy] != h->slice_num)
top_type = 0;
@@ -3920,7 +3992,7 @@ static void loop_filter(H264Context *h, int start_x, int end_x)
int mb_xy, mb_type;
mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
h->slice_num = h->slice_table[mb_xy];
- mb_type = h->cur_pic.f.mb_type[mb_xy];
+ mb_type = h->cur_pic.mb_type[mb_xy];
h->list_count = h->list_counts[mb_xy];
if (FRAME_MBAFF)
@@ -3955,8 +4027,8 @@ static void loop_filter(H264Context *h, int start_x, int end_x)
uvlinesize, 0);
if (fill_filter_caches(h, mb_type))
continue;
- h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mb_xy]);
- h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mb_xy]);
+ h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]);
+ h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]);
if (FRAME_MBAFF) {
ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
@@ -3978,9 +4050,9 @@ static void predict_field_decoding_flag(H264Context *h)
{
const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
- h->cur_pic.f.mb_type[mb_xy - 1] :
+ h->cur_pic.mb_type[mb_xy - 1] :
(h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ?
- h->cur_pic.f.mb_type[mb_xy - h->mb_stride] : 0;
+ h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
}
@@ -4014,7 +4086,7 @@ static void decode_finish_row(H264Context *h)
if (h->droppable)
return;
- ff_thread_report_progress(&h->cur_pic_ptr->f, top + height - 1,
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
h->picture_structure == PICT_BOTTOM_FIELD);
}
@@ -4513,9 +4585,8 @@ again:
end:
/* clean up */
- if (h->cur_pic_ptr && h->cur_pic_ptr->owner2 == h &&
- !h->droppable) {
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
+ if (h->cur_pic_ptr && !h->droppable) {
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
@@ -4543,6 +4614,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
H264Context *h = avctx->priv_data;
AVFrame *pict = data;
int buf_index = 0;
+ int ret;
h->flags = avctx->flags;
@@ -4571,8 +4643,9 @@ out:
h->delayed_pic[i] = h->delayed_pic[i + 1];
if (out) {
+ if ((ret = av_frame_ref(pict, &out->f)) < 0)
+ return ret;
*got_frame = 1;
- *pict = out->f;
}
return buf_index;
@@ -4605,8 +4678,9 @@ out:
/* Wait for second field. */
*got_frame = 0;
} else {
+ if ((ret = av_frame_ref(pict, &h->next_output_pic->f)) < 0)
+ return ret;
*got_frame = 1;
- *pict = h->next_output_pic->f;
}
}
@@ -4635,13 +4709,15 @@ static av_cold int h264_decode_end(AVCodecContext *avctx)
ff_h264_free_context(h);
- if (h->DPB && !h->avctx->internal->is_copy) {
- for (i = 0; i < h->picture_count; i++) {
- free_picture(h, &h->DPB[i]);
+ if (h->DPB) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ unref_picture(h, &h->DPB[i]);
}
}
av_freep(&h->DPB);
+ unref_picture(h, &h->cur_pic);
+
return 0;
}