diff options
author | Anton Khirnov <anton@khirnov.net> | 2015-01-28 13:26:24 +0100 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2015-03-21 11:27:15 +0100 |
commit | 51d8725a6e4c7f989d6f0f38b59cb54a2d72846c (patch) | |
tree | caef0ac89f1ae68553fdedf03e64f9427a4ac4c5 | |
parent | 92e0b7342c0490894cbcea4461380321e0569de2 (diff) | |
download | ffmpeg-51d8725a6e4c7f989d6f0f38b59cb54a2d72846c.tar.gz |
h264: use the main H264Context as the parent for all slice contexts
There is now no need to have per-slice copies of the H264Context.
-rw-r--r-- | libavcodec/h264.c | 42 | ||||
-rw-r--r-- | libavcodec/h264.h | 2 | ||||
-rw-r--r-- | libavcodec/h264_slice.c | 136 |
3 files changed, 69 insertions, 111 deletions
diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 8d213d765b..48ffeabbcd 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -655,7 +655,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) h->thread_context[0] = h; for (i = 0; i < h->nb_slice_ctx; i++) - h->slice_ctx[i].h264 = h->thread_context[0]; + h->slice_ctx[i].h264 = h; h->outputed_poc = h->next_outputed_poc = INT_MIN; for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) @@ -1422,7 +1422,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata) { AVCodecContext *const avctx = h->avctx; - H264Context *hx; ///< thread context H264SliceContext *sl; int buf_index; unsigned context_count; @@ -1468,10 +1467,9 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, continue; } - hx = h->thread_context[context_count]; sl = &h->slice_ctx[context_count]; - ptr = ff_h264_decode_nal(hx, sl, buf + buf_index, &dst_length, + ptr = ff_h264_decode_nal(h, sl, buf + buf_index, &dst_length, &consumed, next_avc - buf_index); if (!ptr || dst_length < 0) { ret = -1; @@ -1484,7 +1482,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, if (h->avctx->debug & FF_DEBUG_STARTCODE) av_log(h->avctx, AV_LOG_DEBUG, "NAL %d at %d/%d length %d\n", - hx->nal_unit_type, buf_index, buf_size, dst_length); + h->nal_unit_type, buf_index, buf_size, dst_length); if (h->is_avc && (nalsize != consumed) && nalsize) av_log(h->avctx, AV_LOG_DEBUG, @@ -1505,17 +1503,17 @@ again: * with frame-mt */ if (parse_extradata && HAVE_THREADS && (h->avctx->active_thread_type & FF_THREAD_FRAME) && - (hx->nal_unit_type != NAL_PPS && - hx->nal_unit_type != NAL_SPS)) { - if (hx->nal_unit_type < NAL_AUD || - hx->nal_unit_type > NAL_AUXILIARY_SLICE) + (h->nal_unit_type != NAL_PPS && + h->nal_unit_type != NAL_SPS)) { + if (h->nal_unit_type < NAL_AUD || + h->nal_unit_type > NAL_AUXILIARY_SLICE) av_log(avctx, AV_LOG_INFO, "Ignoring NAL unit %d during extradata parsing\n", - hx->nal_unit_type); - hx->nal_unit_type = NAL_FF_IGNORE; + h->nal_unit_type); + h->nal_unit_type = NAL_FF_IGNORE; } err = 0; - switch (hx->nal_unit_type) { + switch (h->nal_unit_type) { case NAL_IDR_SLICE: if (h->nal_unit_type != NAL_IDR_SLICE) { av_log(h->avctx, AV_LOG_ERROR, @@ -1527,7 +1525,7 @@ again: case NAL_SLICE: init_get_bits(&sl->gb, ptr, bit_length); - if ((err = ff_h264_decode_slice_header(hx, sl, h))) + if ((err = ff_h264_decode_slice_header(h, sl))) break; if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) { @@ -1536,17 +1534,17 @@ again: } h->cur_pic_ptr->f.key_frame |= - (hx->nal_unit_type == NAL_IDR_SLICE) || + (h->nal_unit_type == NAL_IDR_SLICE) || (h->sei_recovery_frame_cnt >= 0); - if (hx->nal_unit_type == NAL_IDR_SLICE || + if (h->nal_unit_type == NAL_IDR_SLICE || h->recovery_frame == h->frame_num) { h->recovery_frame = -1; h->cur_pic_ptr->recovered = 1; } // If we have an IDR, all frames after it in decoded order are // "recovered". - if (hx->nal_unit_type == NAL_IDR_SLICE) + if (h->nal_unit_type == NAL_IDR_SLICE) h->frame_recovered |= FRAME_RECOVERED_IDR; h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR); @@ -1561,7 +1559,7 @@ again: if (sl->redundant_pic_count == 0 && (avctx->skip_frame < AVDISCARD_NONREF || - hx->nal_ref_idc) && + h->nal_ref_idc) && (avctx->skip_frame < AVDISCARD_BIDIR || sl->slice_type_nos != AV_PICTURE_TYPE_B) && (avctx->skip_frame < AVDISCARD_NONKEY || @@ -1623,7 +1621,7 @@ again: break; default: av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n", - hx->nal_unit_type, bit_length); + h->nal_unit_type, bit_length); } if (context_count == h->max_contexts) { @@ -1637,13 +1635,9 @@ again: av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n"); sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0; } else if (err == 1) { - /* Slice could not be decoded in parallel mode, copy down - * NAL unit stuff to context 0 and restart. Note that - * rbsp_buffer is not transferred, but since we no longer + /* Slice could not be decoded in parallel mode, restart. Note + * that rbsp_buffer is not transferred, but since we no longer * run in parallel mode this should not be an issue. */ - h->nal_unit_type = hx->nal_unit_type; - h->nal_ref_idc = hx->nal_ref_idc; - hx = h; sl = &h->slice_ctx[0]; goto again; } diff --git a/libavcodec/h264.h b/libavcodec/h264.h index 594c65db8f..a24c7c7253 100644 --- a/libavcodec/h264.h +++ b/libavcodec/h264.h @@ -1070,7 +1070,7 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc); int ff_pred_weight_table(H264Context *h, H264SliceContext *sl); int ff_set_ref_count(H264Context *h, H264SliceContext *sl); -int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Context *h0); +int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl); int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count); int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src); diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c index ba5456eb45..40ed93cad9 100644 --- a/libavcodec/h264_slice.c +++ b/libavcodec/h264_slice.c @@ -904,34 +904,6 @@ static void init_scan_tables(H264Context *h) } } -/** - * Replicate H264 "master" context to thread contexts. - */ -static int clone_slice(H264Context *dst, H264Context *src) -{ - memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset)); - dst->cur_pic_ptr = src->cur_pic_ptr; - dst->cur_pic = src->cur_pic; - dst->linesize = src->linesize; - dst->uvlinesize = src->uvlinesize; - dst->first_field = src->first_field; - - dst->prev_poc_msb = src->prev_poc_msb; - dst->prev_poc_lsb = src->prev_poc_lsb; - dst->prev_frame_num_offset = src->prev_frame_num_offset; - dst->prev_frame_num = src->prev_frame_num; - dst->short_ref_count = src->short_ref_count; - - memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref)); - memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref)); - memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list)); - - memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff)); - memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff)); - - return 0; -} - static enum AVPixelFormat get_pixel_format(H264Context *h) { #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \ @@ -1154,12 +1126,10 @@ static int h264_slice_header_init(H264Context *h, int reinit) * This will (re)intialize the decoder and call h264_frame_start() as needed. * * @param h h264context - * @param h0 h264 master context (differs from 'h' when doing sliced based - * parallel decoding) * * @return 0 if okay, <0 if an error occurred, 1 if decoding must not be multithreaded */ -int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Context *h0) +int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) { unsigned int first_mb_in_slice; unsigned int pps_id; @@ -1176,12 +1146,12 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex first_mb_in_slice = get_ue_golomb(&sl->gb); if (first_mb_in_slice == 0) { // FIXME better field boundary detection - if (h0->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) { + if (h->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) { ff_h264_field_end(h, sl, 1); } - h0->current_slice = 0; - if (!h0->first_field) { + h->current_slice = 0; + if (!h->first_field) { if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); @@ -1205,7 +1175,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex slice_type = golomb_to_pict_type[slice_type]; if (slice_type == AV_PICTURE_TYPE_I || - (h0->current_slice != 0 && slice_type == h0->last_slice_type)) { + (h->current_slice != 0 && slice_type == h->last_slice_type)) { default_ref_list_done = 1; } sl->slice_type = slice_type; @@ -1225,15 +1195,15 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); return AVERROR_INVALIDDATA; } - if (!h0->pps_buffers[pps_id]) { + if (!h->pps_buffers[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); return AVERROR_INVALIDDATA; } - h->pps = *h0->pps_buffers[pps_id]; + h->pps = *h->pps_buffers[pps_id]; - if (!h0->sps_buffers[h->pps.sps_id]) { + if (!h->sps_buffers[h->pps.sps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->pps.sps_id); @@ -1241,10 +1211,10 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex } if (h->pps.sps_id != h->sps.sps_id || - h0->sps_buffers[h->pps.sps_id]->new) { - h0->sps_buffers[h->pps.sps_id]->new = 0; + h->sps_buffers[h->pps.sps_id]->new) { + h->sps_buffers[h->pps.sps_id]->new = 0; - h->sps = *h0->sps_buffers[h->pps.sps_id]; + h->sps = *h->sps_buffers[h->pps.sps_id]; if (h->bit_depth_luma != h->sps.bit_depth_luma || h->chroma_format_idc != h->sps.chroma_format_idc) { @@ -1293,13 +1263,13 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex } if (h->context_initialized && needs_reinit) { - if (h != h0) { + if (sl != h->slice_ctx) { av_log(h->avctx, AV_LOG_ERROR, "changing width %d -> %d / height %d -> %d on " "slice %d\n", h->width, h->avctx->coded_width, h->height, h->avctx->coded_height, - h0->current_slice + 1); + h->current_slice + 1); return AVERROR_INVALIDDATA; } @@ -1319,7 +1289,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex } } if (!h->context_initialized) { - if (h != h0) { + if (sl != h->slice_ctx) { av_log(h->avctx, AV_LOG_ERROR, "Cannot (re-)initialize context during parallel decoding.\n"); return AVERROR_PATCHWELCOME; @@ -1336,7 +1306,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex } } - if (h == h0 && h->dequant_coeff_pps != pps_id) { + if (sl == h->slice_ctx && h->dequant_coeff_pps != pps_id) { h->dequant_coeff_pps = pps_id; h264_init_dequant_tables(h); } @@ -1345,8 +1315,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex sl->mb_mbaff = 0; h->mb_aff_frame = 0; - last_pic_structure = h0->picture_structure; - last_pic_droppable = h0->droppable; + last_pic_structure = h->picture_structure; + last_pic_droppable = h->droppable; h->droppable = h->nal_ref_idc == 0; if (h->sps.frame_mbs_only_flag) { h->picture_structure = PICT_FRAME; @@ -1362,7 +1332,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex } sl->mb_field_decoding_flag = h->picture_structure != PICT_FRAME; - if (h0->current_slice != 0) { + if (h->current_slice != 0) { if (last_pic_structure != h->picture_structure || last_pic_droppable != h->droppable) { av_log(h->avctx, AV_LOG_ERROR, @@ -1371,10 +1341,10 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; - } else if (!h0->cur_pic_ptr) { + } else if (!h->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, "unset cur_pic_ptr on slice %d\n", - h0->current_slice + 1); + h->current_slice + 1); return AVERROR_INVALIDDATA; } } else { @@ -1401,27 +1371,27 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex * decode frames as "finished". * We have to do that before the "dummy" in-between frame allocation, * since that can modify s->current_picture_ptr. */ - if (h0->first_field) { - assert(h0->cur_pic_ptr); - assert(h0->cur_pic_ptr->f.buf[0]); - assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); + if (h->first_field) { + assert(h->cur_pic_ptr); + assert(h->cur_pic_ptr->f.buf[0]); + assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { - ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, + ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { - if (h0->cur_pic_ptr->frame_num != h->frame_num) { + if (h->cur_pic_ptr->frame_num != h->frame_num) { /* This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { - ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, + ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { @@ -1454,7 +1424,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex h->frame_num, h->prev_frame_num); ret = h264_frame_start(h); if (ret < 0) { - h0->first_field = 0; + h->first_field = 0; return ret; } @@ -1494,45 +1464,43 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex /* See if we have a decoded first field looking for a pair... * We're using that to see whether to continue decoding in that * frame, or to allocate a new one. */ - if (h0->first_field) { - assert(h0->cur_pic_ptr); - assert(h0->cur_pic_ptr->f.buf[0]); - assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); + if (h->first_field) { + assert(h->cur_pic_ptr); + assert(h->cur_pic_ptr->f.buf[0]); + assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ - h0->cur_pic_ptr = NULL; - h0->first_field = FIELD_PICTURE(h); + h->cur_pic_ptr = NULL; + h->first_field = FIELD_PICTURE(h); } else { - if (h0->cur_pic_ptr->frame_num != h->frame_num) { + if (h->cur_pic_ptr->frame_num != h->frame_num) { /* This and the previous field had different frame_nums. * Consider this field first in pair. Throw away previous * one except for reference purposes. */ - h0->first_field = 1; - h0->cur_pic_ptr = NULL; + h->first_field = 1; + h->cur_pic_ptr = NULL; } else { /* Second field in complementary pair */ - h0->first_field = 0; + h->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ - h0->first_field = FIELD_PICTURE(h); + h->first_field = FIELD_PICTURE(h); } - if (!FIELD_PICTURE(h) || h0->first_field) { + if (!FIELD_PICTURE(h) || h->first_field) { if (h264_frame_start(h) < 0) { - h0->first_field = 0; + h->first_field = 0; return AVERROR_INVALIDDATA; } } else { release_unused_pictures(h, 0); } } - if (h != h0 && (ret = clone_slice(h, h0)) < 0) - return ret; h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup @@ -1617,9 +1585,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex // further down the line. This may break decoding if the first slice is // corrupt, thus we only do this if frame-mt is enabled. if (h->nal_ref_idc) { - ret = ff_h264_decode_ref_pic_marking(h0, &sl->gb, + ret = ff_h264_decode_ref_pic_marking(h, &sl->gb, !(h->avctx->active_thread_type & FF_THREAD_FRAME) || - h0->current_slice == 0); + h->current_slice == 0); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } @@ -1700,19 +1668,19 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex h->nal_ref_idc == 0)) sl->deblocking_filter = 0; - if (sl->deblocking_filter == 1 && h0->max_contexts > 1) { + if (sl->deblocking_filter == 1 && h->max_contexts > 1) { if (h->avctx->flags2 & CODEC_FLAG2_FAST) { /* Cheat slightly for speed: * Do not bother to deblock across slices. */ sl->deblocking_filter = 2; } else { - h0->max_contexts = 1; - if (!h0->single_decode_warning) { + h->max_contexts = 1; + if (!h->single_decode_warning) { av_log(h->avctx, AV_LOG_INFO, "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n"); - h0->single_decode_warning = 1; + h->single_decode_warning = 1; } - if (h != h0) { + if (sl != h->slice_ctx) { av_log(h->avctx, AV_LOG_ERROR, "Deblocking switched inside frame.\n"); return 1; @@ -1726,8 +1694,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex h->pps.chroma_qp_index_offset[1]) + 6 * (h->sps.bit_depth_luma - 8); - h0->last_slice_type = slice_type; - sl->slice_num = ++h0->current_slice; + h->last_slice_type = slice_type; + sl->slice_num = ++h->current_slice; if (sl->slice_num >= MAX_SLICES) { av_log(h->avctx, AV_LOG_ERROR, "Too many slices, increase MAX_SLICES and recompile\n"); @@ -2336,7 +2304,6 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count) { AVCodecContext *const avctx = h->avctx; - H264Context *hx; H264SliceContext *sl; int i; @@ -2356,11 +2323,8 @@ int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count) NULL, context_count, sizeof(h->slice_ctx[0])); /* pull back stuff from slices to master context */ - hx = h->thread_context[context_count - 1]; sl = &h->slice_ctx[context_count - 1]; h->mb_y = sl->mb_y; - h->droppable = hx->droppable; - h->picture_structure = hx->picture_structure; for (i = 1; i < context_count; i++) h->slice_ctx[0].er.error_count += h->slice_ctx[i].er.error_count; } |