diff options
author | Anton Khirnov <anton@khirnov.net> | 2016-03-22 13:31:21 +0100 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2016-04-24 10:06:23 +0200 |
commit | 3176217c60ca7828712985092d9102d331ea4f3d (patch) | |
tree | 1124709788c4b1b3ec4da9cd8e204cc63039cc8f /libavcodec | |
parent | 44d16df413878588659dd8901bba016b5a869fd1 (diff) | |
download | ffmpeg-3176217c60ca7828712985092d9102d331ea4f3d.tar.gz |
h264: decouple h264_ps from the h264 decoder
Make the SPS/PPS parsing independent of the H264Context, to allow
decoupling the parser from the decoder. The change is modelled after the
one done earlier for HEVC.
Move the dequant buffers to the PPS to avoid complex checks whether they
changed and an expensive copy for frame threads.
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/dxva2_h264.c | 80 | ||||
-rw-r--r-- | libavcodec/h264.c | 69 | ||||
-rw-r--r-- | libavcodec/h264.h | 48 | ||||
-rw-r--r-- | libavcodec/h264_cabac.c | 25 | ||||
-rw-r--r-- | libavcodec/h264_cavlc.c | 22 | ||||
-rw-r--r-- | libavcodec/h264_direct.c | 6 | ||||
-rw-r--r-- | libavcodec/h264_loopfilter.c | 8 | ||||
-rw-r--r-- | libavcodec/h264_mb.c | 8 | ||||
-rw-r--r-- | libavcodec/h264_mb_template.c | 20 | ||||
-rw-r--r-- | libavcodec/h264_mvpred.h | 2 | ||||
-rw-r--r-- | libavcodec/h264_parser.c | 73 | ||||
-rw-r--r-- | libavcodec/h264_ps.c | 470 | ||||
-rw-r--r-- | libavcodec/h264_refs.c | 8 | ||||
-rw-r--r-- | libavcodec/h264_sei.c | 23 | ||||
-rw-r--r-- | libavcodec/h264_slice.c | 325 | ||||
-rw-r--r-- | libavcodec/vaapi_h264.c | 66 | ||||
-rw-r--r-- | libavcodec/vdpau_h264.c | 54 |
17 files changed, 677 insertions, 630 deletions
diff --git a/libavcodec/dxva2_h264.c b/libavcodec/dxva2_h264.c index 2d6fa79152..cd13486b7d 100644 --- a/libavcodec/dxva2_h264.c +++ b/libavcodec/dxva2_h264.c @@ -50,6 +50,8 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext * DXVA_PicParams_H264 *pp) { const H264Picture *current_picture = h->cur_pic_ptr; + const SPS *sps = h->ps.sps; + const PPS *pps = h->ps.pps; int i, j; memset(pp, 0, sizeof(*pp)); @@ -94,30 +96,30 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext * pp->wFrameWidthInMbsMinus1 = h->mb_width - 1; pp->wFrameHeightInMbsMinus1 = h->mb_height - 1; - pp->num_ref_frames = h->sps.ref_frame_count; + pp->num_ref_frames = sps->ref_frame_count; pp->wBitFields = ((h->picture_structure != PICT_FRAME) << 0) | - ((h->sps.mb_aff && + ((sps->mb_aff && (h->picture_structure == PICT_FRAME)) << 1) | - (h->sps.residual_color_transform_flag << 2) | + (sps->residual_color_transform_flag << 2) | /* sp_for_switch_flag (not implemented by Libav) */ (0 << 3) | - (h->sps.chroma_format_idc << 4) | + (sps->chroma_format_idc << 4) | ((h->nal_ref_idc != 0) << 6) | - (h->pps.constrained_intra_pred << 7) | - (h->pps.weighted_pred << 8) | - (h->pps.weighted_bipred_idc << 9) | + (pps->constrained_intra_pred << 7) | + (pps->weighted_pred << 8) | + (pps->weighted_bipred_idc << 9) | /* MbsConsecutiveFlag */ (1 << 11) | - (h->sps.frame_mbs_only_flag << 12) | - (h->pps.transform_8x8_mode << 13) | - ((h->sps.level_idc >= 31) << 14) | + (sps->frame_mbs_only_flag << 12) | + (pps->transform_8x8_mode << 13) | + ((sps->level_idc >= 31) << 14) | /* IntraPicFlag (Modified if we detect a non * intra slice in dxva2_h264_decode_slice) */ (1 << 15); - pp->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8; - pp->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8; + pp->bit_depth_luma_minus8 = sps->bit_depth_luma - 8; + pp->bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8; if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) pp->Reserved16Bits = 0; else if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO) @@ -133,28 +135,28 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext * if ((h->picture_structure & PICT_BOTTOM_FIELD) && current_picture->field_poc[1] != INT_MAX) pp->CurrFieldOrderCnt[1] = current_picture->field_poc[1]; - pp->pic_init_qs_minus26 = h->pps.init_qs - 26; - pp->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; - pp->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; + pp->pic_init_qs_minus26 = pps->init_qs - 26; + pp->chroma_qp_index_offset = pps->chroma_qp_index_offset[0]; + pp->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1]; pp->ContinuationFlag = 1; - pp->pic_init_qp_minus26 = h->pps.init_qp - 26; - pp->num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; - pp->num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; + pp->pic_init_qp_minus26 = pps->init_qp - 26; + pp->num_ref_idx_l0_active_minus1 = pps->ref_count[0] - 1; + pp->num_ref_idx_l1_active_minus1 = pps->ref_count[1] - 1; pp->Reserved8BitsA = 0; pp->frame_num = h->frame_num; - pp->log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; - pp->pic_order_cnt_type = h->sps.poc_type; - if (h->sps.poc_type == 0) - pp->log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4; - else if (h->sps.poc_type == 1) - pp->delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; - pp->direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; - pp->entropy_coding_mode_flag = h->pps.cabac; - pp->pic_order_present_flag = h->pps.pic_order_present; - pp->num_slice_groups_minus1 = h->pps.slice_group_count - 1; - pp->slice_group_map_type = h->pps.mb_slice_group_map_type; - pp->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; - pp->redundant_pic_cnt_present_flag= h->pps.redundant_pic_cnt_present; + pp->log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4; + pp->pic_order_cnt_type = sps->poc_type; + if (sps->poc_type == 0) + pp->log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4; + else if (sps->poc_type == 1) + pp->delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag; + pp->direct_8x8_inference_flag = sps->direct_8x8_inference_flag; + pp->entropy_coding_mode_flag = pps->cabac; + pp->pic_order_present_flag = pps->pic_order_present; + pp->num_slice_groups_minus1 = pps->slice_group_count - 1; + pp->slice_group_map_type = pps->mb_slice_group_map_type; + pp->deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present; + pp->redundant_pic_cnt_present_flag= pps->redundant_pic_cnt_present; pp->Reserved8BitsB = 0; pp->slice_group_change_rate_minus1= 0; /* XXX not implemented by Libav */ //pp->SliceGroupMap[810]; /* XXX not implemented by Libav */ @@ -167,20 +169,20 @@ static void fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext *ctx, if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) { for (i = 0; i < 6; i++) for (j = 0; j < 16; j++) - qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][j]; + qm->bScalingLists4x4[i][j] = pps->scaling_matrix4[i][j]; for (i = 0; i < 64; i++) { - qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][i]; - qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][i]; + qm->bScalingLists8x8[0][i] = pps->scaling_matrix8[0][i]; + qm->bScalingLists8x8[1][i] = pps->scaling_matrix8[3][i]; } } else { for (i = 0; i < 6; i++) for (j = 0; j < 16; j++) - qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][ff_zigzag_scan[j]]; + qm->bScalingLists4x4[i][j] = pps->scaling_matrix4[i][ff_zigzag_scan[j]]; for (i = 0; i < 64; i++) { - qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][ff_zigzag_direct[i]]; - qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][ff_zigzag_direct[i]]; + qm->bScalingLists8x8[0][i] = pps->scaling_matrix8[0][ff_zigzag_direct[i]]; + qm->bScalingLists8x8[1][i] = pps->scaling_matrix8[3][ff_zigzag_direct[i]]; } } } @@ -280,11 +282,11 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice, } } slice->slice_qs_delta = 0; /* XXX not implemented by Libav */ - slice->slice_qp_delta = sl->qscale - h->pps.init_qp; + slice->slice_qp_delta = sl->qscale - h->ps.pps->init_qp; slice->redundant_pic_cnt = sl->redundant_pic_count; if (sl->slice_type == AV_PICTURE_TYPE_B) slice->direct_spatial_mv_pred_flag = sl->direct_spatial_mv_pred; - slice->cabac_init_idc = h->pps.cabac ? sl->cabac_init_idc : 0; + slice->cabac_init_idc = h->ps.pps->cabac ? sl->cabac_init_idc : 0; if (sl->deblocking_filter < 2) slice->disable_deblocking_filter_idc = 1 - sl->deblocking_filter; else diff --git a/libavcodec/h264.c b/libavcodec/h264.c index e9dffa6b92..81c1e81832 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -206,9 +206,6 @@ int ff_h264_alloc_tables(H264Context *h) h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride))); } - if (!h->dequant4_coeff[0]) - ff_h264_init_dequant_tables(h); - return 0; fail: @@ -397,7 +394,6 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h) int i; h->avctx = avctx; - h->dequant_coeff_pps = -1; h->picture_structure = PICT_FRAME; h->slice_context_count = 1; @@ -474,9 +470,9 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) } } - if (h->sps.bitstream_restriction_flag && - h->avctx->has_b_frames < h->sps.num_reorder_frames) { - h->avctx->has_b_frames = h->sps.num_reorder_frames; + if (h->ps.sps && h->ps.sps->bitstream_restriction_flag && + h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) { + h->avctx->has_b_frames = h->ps.sps->num_reorder_frames; h->low_delay = 0; } @@ -520,6 +516,7 @@ static int decode_init_thread_copy(AVCodecContext *avctx) */ static void decode_postinit(H264Context *h, int setup_finished) { + const SPS *sps = h->ps.sps; H264Picture *out = h->cur_pic_ptr; H264Picture *cur = h->cur_pic_ptr; int i, pics, out_of_order, out_idx; @@ -547,7 +544,7 @@ static void decode_postinit(H264Context *h, int setup_finished) /* Prioritize picture timing SEI information over used * decoding process if it exists. */ - if (h->sps.pic_struct_present_flag) { + if (sps->pic_struct_present_flag) { switch (h->sei_pic_struct) { case SEI_PIC_STRUCT_FRAME: break; @@ -591,7 +588,7 @@ static void decode_postinit(H264Context *h, int setup_finished) /* Derive top_field_first from field pocs. */ cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1]; } else { - if (cur->f->interlaced_frame || h->sps.pic_struct_present_flag) { + if (cur->f->interlaced_frame || sps->pic_struct_present_flag) { /* Use picture timing SEI information. Even if it is a * information of a past frame, better than nothing. */ if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM || @@ -684,9 +681,9 @@ static void decode_postinit(H264Context *h, int setup_finished) // FIXME do something with unavailable reference frames /* Sort B-frames into display order */ - if (h->sps.bitstream_restriction_flag || + if (sps->bitstream_restriction_flag || h->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) { - h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, h->sps.num_reorder_frames); + h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames); } h->low_delay = !h->avctx->has_b_frames; @@ -746,8 +743,8 @@ static void decode_postinit(H264Context *h, int setup_finished) out_of_order = !out->f->key_frame && !h->mmco_reset && (out->poc < h->next_outputed_poc); - if (h->sps.bitstream_restriction_flag && - h->avctx->has_b_frames >= h->sps.num_reorder_frames) { + if (sps->bitstream_restriction_flag && + h->avctx->has_b_frames >= sps->num_reorder_frames) { } else if (out_of_order && pics - 1 == h->avctx->has_b_frames && h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) { if (invalid + cnt < MAX_DELAYED_PIC_COUNT) { @@ -862,15 +859,16 @@ static void flush_dpb(AVCodecContext *avctx) int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) { - const int max_frame_num = 1 << h->sps.log2_max_frame_num; + const SPS *sps = h->ps.sps; + const int max_frame_num = 1 << sps->log2_max_frame_num; int field_poc[2]; h->frame_num_offset = h->prev_frame_num_offset; if (h->frame_num < h->prev_frame_num) h->frame_num_offset += max_frame_num; - if (h->sps.poc_type == 0) { - const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb; + if (sps->poc_type == 0) { + const int max_poc_lsb = 1 << sps->log2_max_poc_lsb; if (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2) @@ -884,11 +882,11 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) field_poc[1] = h->poc_msb + h->poc_lsb; if (h->picture_structure == PICT_FRAME) field_poc[1] += h->delta_poc_bottom; - } else if (h->sps.poc_type == 1) { + } else if (sps->poc_type == 1) { int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc; int i; - if (h->sps.poc_cycle_length != 0) + if (sps->poc_cycle_length != 0) abs_frame_num = h->frame_num_offset + h->frame_num; else abs_frame_num = 0; @@ -897,25 +895,25 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) abs_frame_num--; expected_delta_per_poc_cycle = 0; - for (i = 0; i < h->sps.poc_cycle_length; i++) + for (i = 0; i < sps->poc_cycle_length; i++) // FIXME integrate during sps parse - expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i]; + expected_delta_per_poc_cycle += sps->offset_for_ref_frame[i]; if (abs_frame_num > 0) { - int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length; - int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length; + int poc_cycle_cnt = (abs_frame_num - 1) / sps->poc_cycle_length; + int frame_num_in_poc_cycle = (abs_frame_num - 1) % sps->poc_cycle_length; expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle; for (i = 0; i <= frame_num_in_poc_cycle; i++) - expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i]; + expectedpoc = expectedpoc + sps->offset_for_ref_frame[i]; } else expectedpoc = 0; if (h->nal_ref_idc == 0) - expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic; + expectedpoc = expectedpoc + sps->offset_for_non_ref_pic; field_poc[0] = expectedpoc + h->delta_poc[0]; - field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field; + field_poc[1] = field_poc[0] + sps->offset_for_top_to_bottom_field; if (h->picture_structure == PICT_FRAME) field_poc[1] += h->delta_poc[1]; @@ -945,7 +943,7 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) * * @return profile as defined by FF_PROFILE_H264_* */ -int ff_h264_get_profile(SPS *sps) +int ff_h264_get_profile(const SPS *sps) { int profile = sps->profile_idc; @@ -1067,7 +1065,7 @@ again: if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) { h->recovery_frame = (h->frame_num + h->sei_recovery_frame_cnt) & - ((1 << h->sps.log2_max_frame_num) - 1); + ((1 << h->ps.sps->log2_max_frame_num) - 1); } h->cur_pic_ptr->f->key_frame |= @@ -1121,14 +1119,13 @@ again: goto end; break; case NAL_SPS: - h->gb = nal->gb; - ret = ff_h264_decode_seq_parameter_set(h); + ret = ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) goto end; break; case NAL_PPS: - h->gb = nal->gb; - ret = ff_h264_decode_picture_parameter_set(h, nal->size_bits); + ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps, + nal->size_bits); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) goto end; break; @@ -1201,14 +1198,14 @@ static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src) if (ret < 0) return ret; - if (!h->sps.crop) + if (!h->ps.sps || !h->ps.sps->crop) return 0; for (i = 0; i < 3; i++) { int hshift = (i > 0) ? h->chroma_x_shift : 0; int vshift = (i > 0) ? h->chroma_y_shift : 0; - int off = ((h->sps.crop_left >> hshift) << h->pixel_shift) + - (h->sps.crop_top >> vshift) * dst->linesize[i]; + int off = ((h->ps.sps->crop_left >> hshift) << h->pixel_shift) + + (h->ps.sps->crop_top >> vshift) * dst->linesize[i]; dst->data[i] += off; } return 0; @@ -1321,10 +1318,10 @@ av_cold void ff_h264_free_context(H264Context *h) h->nb_slice_ctx = 0; for (i = 0; i < MAX_SPS_COUNT; i++) - av_freep(h->sps_buffers + i); + av_buffer_unref(&h->ps.sps_list[i]); for (i = 0; i < MAX_PPS_COUNT; i++) - av_freep(h->pps_buffers + i); + av_buffer_unref(&h->ps.pps_list[i]); ff_h2645_packet_uninit(&h->pkt); } diff --git a/libavcodec/h264.h b/libavcodec/h264.h index 86625f3a13..6a026aa818 100644 --- a/libavcodec/h264.h +++ b/libavcodec/h264.h @@ -28,6 +28,7 @@ #ifndef AVCODEC_H264_H #define AVCODEC_H264_H +#include "libavutil/buffer.h" #include "libavutil/intreadwrite.h" #include "libavutil/thread.h" #include "cabac.h" @@ -92,11 +93,11 @@ #define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h)) #ifndef CABAC -#define CABAC(h) h->pps.cabac +#define CABAC(h) h->ps.pps->cabac #endif -#define CHROMA422(h) (h->sps.chroma_format_idc == 2) -#define CHROMA444(h) (h->sps.chroma_format_idc == 3) +#define CHROMA422(h) (h->ps.sps->chroma_format_idc == 2) +#define CHROMA444(h) (h->ps.sps->chroma_format_idc == 3) #define EXTENDED_SAR 255 @@ -214,7 +215,6 @@ typedef struct SPS { int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8 int residual_color_transform_flag; ///< residual_colour_transform_flag int constraint_set_flags; ///< constraint_set[0-3]_flag - int new; ///< flag to keep track if the decoder context needs re-init due to changed SPS } SPS; /** @@ -240,8 +240,23 @@ typedef struct PPS { uint8_t scaling_matrix8[6][64]; uint8_t chroma_qp_table[2][64]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table int chroma_qp_diff; + + uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16]; + uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64]; + uint32_t(*dequant4_coeff[6])[16]; + uint32_t(*dequant8_coeff[6])[64]; } PPS; +typedef struct H264ParamSets { + AVBufferRef *sps_list[MAX_SPS_COUNT]; + AVBufferRef *pps_list[MAX_PPS_COUNT]; + + /* currently active parameters sets */ + const PPS *pps; + // FIXME this should properly be const + SPS *sps; +} H264ParamSets; + /** * Memory management control operation opcode. */ @@ -506,14 +521,6 @@ typedef struct H264Context { uint32_t *mb2br_xy; int b_stride; // FIXME use s->b4_stride - SPS sps; ///< current sps - PPS pps; ///< current pps - - uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16]; // FIXME should these be moved down? - uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64]; - uint32_t(*dequant4_coeff[6])[16]; - uint32_t(*dequant8_coeff[6])[64]; - uint16_t *slice_table; ///< slice_table_base + 2*mb_stride + 1 // interlacing specific flags @@ -566,10 +573,7 @@ typedef struct H264Context { int bit_depth_luma; ///< luma bit depth from sps to detect changes int chroma_format_idc; ///< chroma format from sps to detect changes - SPS *sps_buffers[MAX_SPS_COUNT]; - PPS *pps_buffers[MAX_PPS_COUNT]; - - int dequant_coeff_pps; ///< reinit tables when pps changes + H264ParamSets ps; uint16_t *slice_table_base; @@ -756,17 +760,19 @@ int ff_h264_decode_sei(H264Context *h); /** * Decode SPS */ -int ff_h264_decode_seq_parameter_set(H264Context *h); +int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, + H264ParamSets *ps); /** * compute profile from sps */ -int ff_h264_get_profile(SPS *sps); +int ff_h264_get_profile(const SPS *sps); /** * Decode PPS */ -int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length); +int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, + H264ParamSets *ps, int bit_length); /** * Free any data that may have been allocated in the H264 context @@ -912,7 +918,7 @@ static av_always_inline uint16_t pack8to16(int a, int b) */ static av_always_inline int get_chroma_qp(const H264Context *h, int t, int qscale) { - return h->pps.chroma_qp_table[t][qscale]; + return h->ps.pps->chroma_qp_table[t][qscale]; } /** @@ -1035,7 +1041,7 @@ static av_always_inline void write_back_motion(const H264Context *h, static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl) { - if (h->sps.direct_8x8_inference_flag) + if (h->ps.sps->direct_8x8_inference_flag) return !(AV_RN64A(sl->sub_mb_type) & ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) * 0x0001000100010001ULL)); diff --git a/libavcodec/h264_cabac.c b/libavcodec/h264_cabac.c index 589a021e1f..f831a68846 100644 --- a/libavcodec/h264_cabac.c +++ b/libavcodec/h264_cabac.c @@ -1265,7 +1265,7 @@ void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl) { int i; const int8_t (*tab)[2]; - const int slice_qp = av_clip(sl->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51); + const int slice_qp = av_clip(sl->qscale - 6*(h->ps.sps->bit_depth_luma-8), 0, 51); if (sl->slice_type_nos == AV_PICTURE_TYPE_I) tab = cabac_context_init_I; else tab = cabac_context_init_PB[sl->cabac_init_idc]; @@ -1870,7 +1870,7 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2 decode_cabac_residual_dc(h, sl, sl->mb_luma_dc[p], ctx_cat[0][p], LUMA_DC_BLOCK_INDEX+p, scan, 16); if( cbp&15 ) { - qmul = h->dequant4_coeff[p][qscale]; + qmul = h->ps.pps->dequant4_coeff[p][qscale]; for( i4x4 = 0; i4x4 < 16; i4x4++ ) { const int index = 16*p + i4x4; decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[1][p], index, scan + 1, qmul, 15); @@ -1885,9 +1885,9 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2 if( IS_8x8DCT(mb_type) ) { const int index = 16*p + 4*i8x8; decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[3][p], index, - scan8x8, h->dequant8_coeff[cqm][qscale], 64); + scan8x8, h->ps.pps->dequant8_coeff[cqm][qscale], 64); } else { - qmul = h->dequant4_coeff[cqm][qscale]; + qmul = h->ps.pps->dequant4_coeff[cqm][qscale]; for( i4x4 = 0; i4x4 < 4; i4x4++ ) { const int index = 16*p + 4*i8x8 + i4x4; //START_TIMER @@ -1908,10 +1908,11 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2 */ int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl) { + const SPS *sps = h->ps.sps; int mb_xy; int mb_type, partition_count, cbp = 0; - int dct8x8_allowed= h->pps.transform_8x8_mode; - int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2; + int dct8x8_allowed= h->ps.pps->transform_8x8_mode; + int decode_chroma = sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2; const int pixel_shift = h->pixel_shift; mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride; @@ -2021,8 +2022,8 @@ decode_intra_mb: h->slice_table[mb_xy] = sl->slice_num; if(IS_INTRA_PCM(mb_type)) { - const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] * - h->sps.bit_depth_luma >> 3; + const int mb_size = ff_h264_mb_sizes[sps->chroma_format_idc] * + sps->bit_depth_luma >> 3; const uint8_t *ptr; // We assume these blocks are very rare so we do not optimize it. @@ -2206,7 +2207,7 @@ decode_intra_mb: ff_h264_pred_direct_motion(h, sl, &mb_type); fill_rectangle(sl->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 2); fill_rectangle(sl->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 2); - dct8x8_allowed &= h->sps.direct_8x8_inference_flag; + dct8x8_allowed &= sps->direct_8x8_inference_flag; } else { int list, i; if(IS_16X16(mb_type)){ @@ -2365,7 +2366,7 @@ decode_intra_mb: if(get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + (sl->last_qscale_diff != 0)])){ int val = 1; int ctx= 2; - const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8); + const int max_qp = 51 + 6*(sps->bit_depth_luma-8); while( get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + ctx] ) ) { ctx= 3; @@ -2408,7 +2409,7 @@ decode_intra_mb: int c, i, i8x8; for( c = 0; c < 2; c++ ) { int16_t *mb = sl->mb + (16*(16 + 16*c) << pixel_shift); - qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; + qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; for (i8x8 = 0; i8x8 < 2; i8x8++) { for (i = 0; i < 4; i++) { const int index = 16 + 16 * c + 8*i8x8 + i; @@ -2432,7 +2433,7 @@ decode_intra_mb: if( cbp&0x20 ) { int c, i; for( c = 0; c < 2; c++ ) { - qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; + qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; for( i = 0; i < 4; i++ ) { const int index = 16 + 16 * c + i; decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), 4, index, scan + 1, qmul, 15); diff --git a/libavcodec/h264_cavlc.c b/libavcodec/h264_cavlc.c index bdd9f73975..10511fba05 100644 --- a/libavcodec/h264_cavlc.c +++ b/libavcodec/h264_cavlc.c @@ -649,7 +649,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl, for(i4x4=0; i4x4<4; i4x4++){ const int index= i4x4 + 4*i8x8 + p*16; if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), - index, scan + 1, h->dequant4_coeff[p][qscale], 15) < 0 ){ + index, scan + 1, h->ps.pps->dequant4_coeff[p][qscale], 15) < 0 ){ return -1; } } @@ -671,7 +671,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl, for(i4x4=0; i4x4<4; i4x4++){ const int index= i4x4 + 4*i8x8 + p*16; if( decode_residual(h, sl, gb, buf, index, scan8x8+16*i4x4, - h->dequant8_coeff[cqm][qscale], 16) < 0 ) + h->ps.pps->dequant8_coeff[cqm][qscale], 16) < 0 ) return -1; } nnz = &sl->non_zero_count_cache[scan8[4 * i8x8 + p * 16]]; @@ -681,7 +681,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl, for(i4x4=0; i4x4<4; i4x4++){ const int index= i4x4 + 4*i8x8 + p*16; if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), index, - scan, h->dequant4_coeff[cqm][qscale], 16) < 0 ){ + scan, h->ps.pps->dequant4_coeff[cqm][qscale], 16) < 0 ){ return -1; } new_cbp |= sl->non_zero_count_cache[scan8[index]] << i8x8; @@ -701,8 +701,8 @@ int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl) int mb_xy; int partition_count; unsigned int mb_type, cbp; - int dct8x8_allowed= h->pps.transform_8x8_mode; - int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2; + int dct8x8_allowed= h->ps.pps->transform_8x8_mode; + int decode_chroma = h->ps.sps->chroma_format_idc == 1 || h->ps.sps->chroma_format_idc == 2; const int pixel_shift = h->pixel_shift; mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride; @@ -768,8 +768,8 @@ decode_intra_mb: h->slice_table[mb_xy] = sl->slice_num; if(IS_INTRA_PCM(mb_type)){ - const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] * - h->sps.bit_depth_luma; + const int mb_size = ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] * + h->ps.sps->bit_depth_luma; // We assume these blocks are very rare so we do not optimize it. sl->intra_pcm_ptr = align_get_bits(&sl->gb); @@ -942,7 +942,7 @@ decode_intra_mb: } }else if(IS_DIRECT(mb_type)){ ff_h264_pred_direct_motion(h, sl, &mb_type); - dct8x8_allowed &= h->sps.direct_8x8_inference_flag; + dct8x8_allowed &= h->ps.sps->direct_8x8_inference_flag; }else{ int list, mx, my, i; //FIXME we should set ref_idx_l? to 0 if we use that later ... @@ -1092,7 +1092,7 @@ decode_intra_mb: int ret; GetBitContext *gb = &sl->gb; const uint8_t *scan, *scan8x8; - const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8); + const int max_qp = 51 + 6 * (h->ps.sps->bit_depth_luma - 8); if(IS_INTERLACED(mb_type)){ scan8x8 = sl->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0; @@ -1141,7 +1141,7 @@ decode_intra_mb: if(cbp&0x20){ for(chroma_idx=0; chroma_idx<2; chroma_idx++){ - const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]]; + const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]]; int16_t *mb = sl->mb + (16*(16 + 16*chroma_idx) << pixel_shift); for (i8x8 = 0; i8x8 < 2; i8x8++) { for (i4x4 = 0; i4x4 < 4; i4x4++) { @@ -1167,7 +1167,7 @@ decode_intra_mb: if(cbp&0x20){ for(chroma_idx=0; chroma_idx<2; chroma_idx++){ - const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]]; + const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]]; for(i4x4=0; i4x4<4; i4x4++){ const int index= 16 + 16*chroma_idx + i4x4; if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), index, scan + 1, qmul, 15) < 0){ diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c index a7966e15e9..391f8f1d6a 100644 --- a/libavcodec/h264_direct.c +++ b/libavcodec/h264_direct.c @@ -310,7 +310,7 @@ single_col: *mb_type |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); } else { - if (!h->sps.direct_8x8_inference_flag) { + if (!h->ps.sps->direct_8x8_inference_flag) { /* FIXME: Save sub mb types from previous frames (or derive * from MVs) so we know exactly what block size to use. */ sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */ @@ -533,7 +533,7 @@ single_col: *mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); } else { - if (!h->sps.direct_8x8_inference_flag) { + if (!h->ps.sps->direct_8x8_inference_flag) { /* FIXME: save sub mb types from previous frames (or derive * from MVs) so we know exactly what block size to use */ sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 | @@ -574,7 +574,7 @@ single_col: if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) { int y_shift = 2 * !IS_INTERLACED(*mb_type); - assert(h->sps.direct_8x8_inference_flag); + assert(h->ps.sps->direct_8x8_inference_flag); for (i8 = 0; i8 < 4; i8++) { const int x8 = i8 & 1; diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c index ae93681246..fccfc66c1c 100644 --- a/libavcodec/h264_loopfilter.c +++ b/libavcodec/h264_loopfilter.c @@ -252,7 +252,7 @@ static av_always_inline void h264_filter_mb_fast_internal(const H264Context *h, int left_type = sl->left_type[LTOP]; int top_type = sl->top_type; - int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); + int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8); int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset; int b = 52 + sl->slice_beta_offset - qp_bd_offset; @@ -422,7 +422,7 @@ void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, unsigned int linesize, unsigned int uvlinesize) { assert(!FRAME_MBAFF(h)); - if(!h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) { + if(!h->h264dsp.h264_loop_filter_strength || h->ps.pps->chroma_qp_diff) { ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize); return; } @@ -722,7 +722,7 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; int first_vertical_edge_done = 0; int chroma = !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY)); - int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); + int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8); int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset; int b = 52 + sl->slice_beta_offset - qp_bd_offset; @@ -765,7 +765,7 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, bS[i] = 4; else{ bS[i] = 1 + !!(sl->non_zero_count_cache[12+8*(i>>1)] | - ((!h->pps.cabac && IS_8x8DCT(mbn_type)) ? + ((!h->ps.pps->cabac && IS_8x8DCT(mbn_type)) ? (h->cbp_table[mbn_xy] & (((MB_FIELD(sl) ? (i&2) : (mb_y&1)) ? 8 : 2) << 12)) : h->non_zero_count[mbn_xy][ off[i] ])); diff --git a/libavcodec/h264_mb.c b/libavcodec/h264_mb.c index 6006892919..b6773e63b6 100644 --- a/libavcodec/h264_mb.c +++ b/libavcodec/h264_mb.c @@ -634,7 +634,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h, for (i = 0; i < 16; i += 4) { uint8_t *const ptr = dest_y + block_offset[i]; const int dir = sl->intra4x4_pred_mode_cache[scan8[i]]; - if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) { + if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) { h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); } else { const int nnz = sl->non_zero_count_cache[scan8[i + p * 16]]; @@ -660,7 +660,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h, uint8_t *const ptr = dest_y + block_offset[i]; const int dir = sl->intra4x4_pred_mode_cache[scan8[i]]; - if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) { + if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) { h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); } else { uint8_t *topright; @@ -699,7 +699,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h, if (!transform_bypass) h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 << pixel_shift), sl->mb_luma_dc[p], - h->dequant4_coeff[p][qscale][0]); + h->ps.pps->dequant4_coeff[p][qscale][0]); else { static const uint8_t dc_mapping[16] = { 0 * 16, 1 * 16, 4 * 16, 5 * 16, @@ -731,7 +731,7 @@ static av_always_inline void hl_decode_mb_idct_luma(const H264Context *h, H264Sl if (!IS_INTRA4x4(mb_type)) { if (IS_INTRA16x16(mb_type)) { if (transform_bypass) { - if (h->sps.profile_idc == 244 && + if (h->ps.sps->profile_idc == 244 && (sl->intra16x16_pred_mode == VERT_PRED8x8 || sl->intra16x16_pred_mode == HOR_PRED8x8)) { h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset, diff --git a/libavcodec/h264_mb_template.c b/libavcodec/h264_mb_template.c index 2c83343ba6..7da4f416bf 100644 --- a/libavcodec/h264_mb_template.c +++ b/libavcodec/h264_mb_template.c @@ -48,7 +48,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex int linesize, uvlinesize /*dct_offset*/; int i, j; const int *block_offset = &h->block_offset[0]; - const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass); + const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->ps.sps->transform_bypass); void (*idct_add)(uint8_t *dst, int16_t *block, int stride); const int block_h = 16 >> h->chroma_y_shift; const int chroma422 = CHROMA422(h); @@ -97,11 +97,11 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex if (!SIMPLE && IS_INTRA_PCM(mb_type)) { if (PIXEL_SHIFT) { - const int bit_depth = h->sps.bit_depth_luma; + const int bit_depth = h->ps.sps->bit_depth_luma; int j; GetBitContext gb; init_get_bits(&gb, sl->intra_pcm_ptr, - ff_h264_mb_sizes[h->sps.chroma_format_idc] * bit_depth); + ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] * bit_depth); for (i = 0; i < 16; i++) { uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize); @@ -109,7 +109,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex tmp_y[j] = get_bits(&gb, bit_depth); } if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { - if (!h->sps.chroma_format_idc) { + if (!h->ps.sps->chroma_format_idc) { for (i = 0; i < block_h; i++) { uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); for (j = 0; j < 8; j++) @@ -137,7 +137,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex for (i = 0; i < 16; i++) memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16); if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { - if (!h->sps.chroma_format_idc) { + if (!h->ps.sps->chroma_format_idc) { for (i = 0; i < block_h; i++) { memset(dest_cb + i * uvlinesize, 128, 8); memset(dest_cr + i * uvlinesize, 128, 8); @@ -193,7 +193,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex (sl->cbp & 0x30)) { uint8_t *dest[2] = { dest_cb, dest_cr }; if (transform_bypass) { - if (IS_INTRA(mb_type) && h->sps.profile_idc == 244 && + if (IS_INTRA(mb_type) && h->ps.sps->profile_idc == 244 && (sl->chroma_pred_mode == VERT_PRED8x8 || sl->chroma_pred_mode == HOR_PRED8x8)) { h->hpc.pred8x8_add[sl->chroma_pred_mode](dest[0], @@ -234,10 +234,10 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex } if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]]) h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 1 << PIXEL_SHIFT), - h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]); + h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]); if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]]) h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 2 << PIXEL_SHIFT), - h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]); + h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]); h->h264dsp.h264_idct_add8(dest, block_offset, sl->mb, uvlinesize, sl->non_zero_count_cache); @@ -262,7 +262,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo int linesize; int i, j, p; const int *block_offset = &h->block_offset[0]; - const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass); + const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->ps.sps->transform_bypass); const int plane_count = (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) ? 3 : 1; for (p = 0; p < plane_count; p++) { @@ -304,7 +304,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo if (!SIMPLE && IS_INTRA_PCM(mb_type)) { if (PIXEL_SHIFT) { - const int bit_depth = h->sps.bit_depth_luma; + const int bit_depth = h->ps.sps->bit_depth_luma; GetBitContext gb; init_get_bits(&gb, sl->intra_pcm_ptr, 768 * bit_depth); diff --git a/libavcodec/h264_mvpred.h b/libavcodec/h264_mvpred.h index 5e8f237f63..d97dac0e6a 100644 --- a/libavcodec/h264_mvpred.h +++ b/libavcodec/h264_mvpred.h @@ -464,7 +464,7 @@ static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int m if (!IS_SKIP(mb_type)) { if (IS_INTRA(mb_type)) { - int type_mask = h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1; + int type_mask = h->ps.pps->constrained_intra_pred ? IS_INTRA(-1) : -1; sl->topleft_samples_available = sl->top_samples_available = sl->left_samples_available = 0xFFFF; diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c index 9f82a370de..cd37d31fbd 100644 --- a/libavcodec/h264_parser.c +++ b/libavcodec/h264_parser.c @@ -45,6 +45,7 @@ typedef struct H264ParseContext { H264Context h; ParseContext pc; + H264ParamSets ps; int got_first; } H264ParseContext; @@ -115,13 +116,13 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb) int list_count, ref_count[2]; - if (h->pps.redundant_pic_cnt_present) + if (p->ps.pps->redundant_pic_cnt_present) get_ue_golomb(gb); // redundant_pic_count if (slice_type_nos == AV_PICTURE_TYPE_B) get_bits1(gb); // direct_spatial_mv_pred - if (ff_h264_parse_ref_count(&list_count, ref_count, gb, &h->pps, + if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->ps.pps, slice_type_nos, h->picture_structure) < 0) return AVERROR_INVALIDDATA; @@ -153,9 +154,9 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb) } } - if ((h->pps.weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) || - (h->pps.weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B)) - ff_h264_pred_weight_table(gb, &h->sps, ref_count, slice_type_nos, + if ((p->ps.pps->weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) || + (p->ps.pps->weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B)) + ff_h264_pred_weight_table(gb, p->ps.sps, ref_count, slice_type_nos, &pwt); if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag @@ -220,6 +221,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, return 0; for (;;) { + const SPS *sps; int src_length, consumed; buf = avpriv_find_start_code(buf, buf_end, &state); if (buf >= buf_end) @@ -260,10 +262,11 @@ static inline int parse_nal_units(AVCodecParserContext *s, switch (h->nal_unit_type) { case NAL_SPS: - ff_h264_decode_seq_parameter_set(h); + ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps); break; case NAL_PPS: - ff_h264_decode_picture_parameter_set(h, h->gb.size_in_bits); + ff_h264_decode_picture_parameter_set(&nal.gb, avctx, &p->ps, + nal.size_bits); break; case NAL_SEI: ff_h264_decode_sei(h); @@ -290,30 +293,35 @@ static inline int parse_nal_units(AVCodecParserContext *s, "pps_id %u out of range\n", pps_id); goto fail; } - if (!h->pps_buffers[pps_id]) { + if (!p->ps.pps_list[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); goto fail; } - h->pps = *h->pps_buffers[pps_id]; - if (!h->sps_buffers[h->pps.sps_id]) { + p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data; + if (!p->ps.sps_list[p->ps.pps->sps_id]) { av_log(h->avctx, AV_LOG_ERROR, - "non-existing SPS %u referenced\n", h->pps.sps_id); + "non-existing SPS %u referenced\n", p->ps.pps->sps_id); goto fail; } - h->sps = *h->sps_buffers[h->pps.sps_id]; - h->frame_num = get_bits(&nal.gb, h->sps.log2_max_frame_num); + p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data; - s->coded_width = 16 * h->sps.mb_width; - s->coded_height = 16 * h->sps.mb_height; - s->width = s->coded_width - (h->sps.crop_right + h->sps.crop_left); - s->height = s->coded_height - (h->sps.crop_top + h->sps.crop_bottom); + h->ps.sps = p->ps.sps; + h->ps.pps = p->ps.pps; + sps = p->ps.sps; + + h->frame_num = get_bits(&nal.gb, sps->log2_max_frame_num); + + s->coded_width = 16 * sps->mb_width; + s->coded_height = 16 * sps->mb_height; + s->width = s->coded_width - (sps->crop_right + sps->crop_left); + s->height = s->coded_height - (sps->crop_top + sps->crop_bottom); if (s->width <= 0 || s->height <= 0) { s->width = s->coded_width; s->height = s->coded_height; } - switch (h->sps.bit_depth_luma) { + switch (sps->bit_depth_luma) { case 9: if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P9; else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P9; @@ -333,10 +341,10 @@ static inline int parse_nal_units(AVCodecParserContext *s, s->format = AV_PIX_FMT_NONE; } - avctx->profile = ff_h264_get_profile(&h->sps); - avctx->level = h->sps.level_idc; + avctx->profile = ff_h264_get_profile(sps); + avctx->level = sps->level_idc; - if (h->sps.frame_mbs_only_flag) { + if (sps->frame_mbs_only_flag) { h->picture_structure = PICT_FRAME; } else { if (get_bits1(&nal.gb)) { // field_pic_flag @@ -348,19 +356,19 @@ static inline int parse_nal_units(AVCodecParserContext *s, if (h->nal_unit_type == NAL_IDR_SLICE) get_ue_golomb(&nal.gb); /* idr_pic_id */ - if (h->sps.poc_type == 0) { - h->poc_lsb = get_bits(&nal.gb, h->sps.log2_max_poc_lsb); + if (sps->poc_type == 0) { + h->poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb); - if (h->pps.pic_order_present == 1 && + if (p->ps.pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc_bottom = get_se_golomb(&nal.gb); } - if (h->sps.poc_type == 1 && - !h->sps.delta_pic_order_always_zero_flag) { + if (sps->poc_type == 1 && + !sps->delta_pic_order_always_zero_flag) { h->delta_poc[0] = get_se_golomb(&nal.gb); - if (h->pps.pic_order_present == 1 && + if (p->ps.pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc[1] = get_se_golomb(&nal.gb); } @@ -394,7 +402,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, } } - if (h->sps.pic_struct_present_flag) { + if (sps->pic_struct_present_flag) { switch (h->sei_pic_struct) { case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_BOTTOM_FIELD: @@ -425,7 +433,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, if (h->picture_structure == PICT_FRAME) { s->picture_structure = AV_PICTURE_STRUCTURE_FRAME; - if (h->sps.pic_struct_present_flag) { + if (sps->pic_struct_present_flag) { switch (h->sei_pic_struct) { case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: @@ -566,9 +574,16 @@ static void h264_close(AVCodecParserContext *s) H264ParseContext *p = s->priv_data; H264Context *h = &p->h; ParseContext *pc = &p->pc; + int i; av_free(pc->buffer); ff_h264_free_context(h); + + for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++) + av_buffer_unref(&p->ps.sps_list[i]); + + for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++) + av_buffer_unref(&p->ps.pps_list[i]); } static av_cold int init(AVCodecParserContext *s) diff --git a/libavcodec/h264_ps.c b/libavcodec/h264_ps.c index d77fabf284..99ede76920 100644 --- a/libavcodec/h264_ps.c +++ b/libavcodec/h264_ps.c @@ -104,47 +104,71 @@ static const int level_max_dpb_mbs[][2] = { { 52, 184320 }, }; -static inline int decode_hrd_parameters(H264Context *h, SPS *sps) +static void remove_pps(H264ParamSets *s, int id) +{ + if (s->pps_list[id] && s->pps == (const PPS*)s->pps_list[id]->data) + s->pps = NULL; + av_buffer_unref(&s->pps_list[id]); +} + +static void remove_sps(H264ParamSets *s, int id) +{ + int i; + if (s->sps_list[id]) { + if (s->sps == (SPS*)s->sps_list[id]->data) + s->sps = NULL; + + /* drop all PPS that depend on this SPS */ + for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++) + if (s->pps_list[i] && ((PPS*)s->pps_list[i]->data)->sps_id == id) + remove_pps(s, i); + } + av_buffer_unref(&s->sps_list[id]); +} + +static inline int decode_hrd_parameters(GetBitContext *gb, AVCodecContext *avctx, + SPS *sps) { int cpb_count, i; - cpb_count = get_ue_golomb_31(&h->gb) + 1; + cpb_count = get_ue_golomb_31(gb) + 1; if (cpb_count > 32U) { - av_log(h->avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count); + av_log(avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count); return AVERROR_INVALIDDATA; } - get_bits(&h->gb, 4); /* bit_rate_scale */ - get_bits(&h->gb, 4); /* cpb_size_scale */ + get_bits(gb, 4); /* bit_rate_scale */ + get_bits(gb, 4); /* cpb_size_scale */ for (i = 0; i < cpb_count; i++) { - get_ue_golomb_long(&h->gb); /* bit_rate_value_minus1 */ - get_ue_golomb_long(&h->gb); /* cpb_size_value_minus1 */ - get_bits1(&h->gb); /* cbr_flag */ - } - sps->initial_cpb_removal_delay_length = get_bits(&h->gb, 5) + 1; - sps->cpb_removal_delay_length = get_bits(&h->gb, 5) + 1; - sps->dpb_output_delay_length = get_bits(&h->gb, 5) + 1; - sps->time_offset_length = get_bits(&h->gb, 5); + get_ue_golomb_long(gb); /* bit_rate_value_minus1 */ + get_ue_golomb_long(gb); /* cpb_size_value_minus1 */ + get_bits1(gb); /* cbr_flag */ + } + sps->initial_cpb_removal_delay_length = get_bits(gb, 5) + 1; + sps->cpb_removal_delay_length = get_bits(gb, 5) + 1; + sps->dpb_output_delay_length = get_bits(gb, 5) + 1; + sps->time_offset_length = get_bits(gb, 5); sps->cpb_cnt = cpb_count; return 0; } -static inline int decode_vui_parameters(H264Context *h, SPS *sps) +static inline int decode_vui_parameters(GetBitContext *gb, AVCodecContext *avctx, + SPS *sps) { int aspect_ratio_info_present_flag; unsigned int aspect_ratio_idc; - aspect_ratio_info_present_flag = get_bits1(&h->gb); + aspect_ratio_info_present_flag = get_bits1(gb); if (aspect_ratio_info_present_flag) { - aspect_ratio_idc = get_bits(&h->gb, 8); + aspect_ratio_idc = get_bits(gb, 8); if (aspect_ratio_idc == EXTENDED_SAR) { - sps->sar.num = get_bits(&h->gb, 16); - sps->sar.den = get_bits(&h->gb, 16); + sps->sar.num = get_bits(gb, 16); + sps->sar.den = get_bits(gb, 16); } else if (aspect_ratio_idc < FF_ARRAY_ELEMS(pixel_aspect)) { sps->sar = pixel_aspect[aspect_ratio_idc]; } else { - av_log(h->avctx, AV_LOG_ERROR, "illegal aspect ratio\n"); + av_log(avctx, AV_LOG_ERROR, "illegal aspect ratio\n"); return AVERROR_INVALIDDATA; } } else { @@ -152,19 +176,19 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps) sps->sar.den = 0; } - if (get_bits1(&h->gb)) /* overscan_info_present_flag */ - get_bits1(&h->gb); /* overscan_appropriate_flag */ + if (get_bits1(gb)) /* overscan_info_present_flag */ + get_bits1(gb); /* overscan_appropriate_flag */ - sps->video_signal_type_present_flag = get_bits1(&h->gb); + sps->video_signal_type_present_flag = get_bits1(gb); if (sps->video_signal_type_present_flag) { - get_bits(&h->gb, 3); /* video_format */ - sps->full_range = get_bits1(&h->gb); /* video_full_range_flag */ + get_bits(gb, 3); /* video_format */ + sps->full_range = get_bits1(gb); /* video_full_range_flag */ - sps->colour_description_present_flag = get_bits1(&h->gb); + sps->colour_description_present_flag = get_bits1(gb); if (sps->colour_description_present_flag) { - sps->color_primaries = get_bits(&h->gb, 8); /* colour_primaries */ - sps->color_trc = get_bits(&h->gb, 8); /* transfer_characteristics */ - sps->colorspace = get_bits(&h->gb, 8); /* matrix_coefficients */ + sps->color_primaries = get_bits(gb, 8); /* colour_primaries */ + sps->color_trc = get_bits(gb, 8); /* transfer_characteristics */ + sps->colorspace = get_bits(gb, 8); /* matrix_coefficients */ if (sps->color_primaries >= AVCOL_PRI_NB) sps->color_primaries = AVCOL_PRI_UNSPECIFIED; if (sps->color_trc >= AVCOL_TRC_NB) @@ -175,83 +199,83 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps) } /* chroma_location_info_present_flag */ - if (get_bits1(&h->gb)) { + if (get_bits1(gb)) { /* chroma_sample_location_type_top_field */ - h->avctx->chroma_sample_location = get_ue_golomb(&h->gb) + 1; - get_ue_golomb(&h->gb); /* chroma_sample_location_type_bottom_field */ + avctx->chroma_sample_location = get_ue_golomb(gb) + 1; + get_ue_golomb(gb); /* chroma_sample_location_type_bottom_field */ } - sps->timing_info_present_flag = get_bits1(&h->gb); + sps->timing_info_present_flag = get_bits1(gb); if (sps->timing_info_present_flag) { - sps->num_units_in_tick = get_bits_long(&h->gb, 32); - sps->time_scale = get_bits_long(&h->gb, 32); + sps->num_units_in_tick = get_bits_long(gb, 32); + sps->time_scale = get_bits_long(gb, 32); if (!sps->num_units_in_tick || !sps->time_scale) { - av_log(h->avctx, AV_LOG_ERROR, + av_log(avctx, AV_LOG_ERROR, "time_scale/num_units_in_tick invalid or unsupported (%"PRIu32"/%"PRIu32")\n", sps->time_scale, sps->num_units_in_tick); return AVERROR_INVALIDDATA; } - sps->fixed_frame_rate_flag = get_bits1(&h->gb); + sps->fixed_frame_rate_flag = get_bits1(gb); } - sps->nal_hrd_parameters_present_flag = get_bits1(&h->gb); + sps->nal_hrd_parameters_present_flag = get_bits1(gb); if (sps->nal_hrd_parameters_present_flag) - if (decode_hrd_parameters(h, sps) < 0) + if (decode_hrd_parameters(gb, avctx, sps) < 0) return AVERROR_INVALIDDATA; - sps->vcl_hrd_parameters_present_flag = get_bits1(&h->gb); + sps->vcl_hrd_parameters_present_flag = get_bits1(gb); if (sps->vcl_hrd_parameters_present_flag) - if (decode_hrd_parameters(h, sps) < 0) + if (decode_hrd_parameters(gb, avctx, sps) < 0) return AVERROR_INVALIDDATA; if (sps->nal_hrd_parameters_present_flag || sps->vcl_hrd_parameters_present_flag) - get_bits1(&h->gb); /* low_delay_hrd_flag */ - sps->pic_struct_present_flag = get_bits1(&h->gb); + get_bits1(gb); /* low_delay_hrd_flag */ + sps->pic_struct_present_flag = get_bits1(gb); - sps->bitstream_restriction_flag = get_bits1(&h->gb); + sps->bitstream_restriction_flag = get_bits1(gb); if (sps->bitstream_restriction_flag) { - get_bits1(&h->gb); /* motion_vectors_over_pic_boundaries_flag */ - get_ue_golomb(&h->gb); /* max_bytes_per_pic_denom */ - get_ue_golomb(&h->gb); /* max_bits_per_mb_denom */ - get_ue_golomb(&h->gb); /* log2_max_mv_length_horizontal */ - get_ue_golomb(&h->gb); /* log2_max_mv_length_vertical */ - sps->num_reorder_frames = get_ue_golomb(&h->gb); - get_ue_golomb(&h->gb); /*max_dec_frame_buffering*/ - - if (get_bits_left(&h->gb) < 0) { + get_bits1(gb); /* motion_vectors_over_pic_boundaries_flag */ + get_ue_golomb(gb); /* max_bytes_per_pic_denom */ + get_ue_golomb(gb); /* max_bits_per_mb_denom */ + get_ue_golomb(gb); /* log2_max_mv_length_horizontal */ + get_ue_golomb(gb); /* log2_max_mv_length_vertical */ + sps->num_reorder_frames = get_ue_golomb(gb); + get_ue_golomb(gb); /*max_dec_frame_buffering*/ + + if (get_bits_left(gb) < 0) { sps->num_reorder_frames = 0; sps->bitstream_restriction_flag = 0; } if (sps->num_reorder_frames > 16U /* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) { - av_log(h->avctx, AV_LOG_ERROR, + av_log(avctx, AV_LOG_ERROR, "Clipping illegal num_reorder_frames %d\n", sps->num_reorder_frames); sps->num_reorder_frames = 16; return AVERROR_INVALIDDATA; } } - if (get_bits_left(&h->gb) < 0) { - av_log(h->avctx, AV_LOG_ERROR, - "Overread VUI by %d bits\n", -get_bits_left(&h->gb)); + if (get_bits_left(gb) < 0) { + av_log(avctx, AV_LOG_ERROR, + "Overread VUI by %d bits\n", -get_bits_left(gb)); return AVERROR_INVALIDDATA; } return 0; } -static void decode_scaling_list(H264Context *h, uint8_t *factors, int size, +static void decode_scaling_list(GetBitContext *gb, uint8_t *factors, int size, const uint8_t *jvt_list, const uint8_t *fallback_list) { int i, last = 8, next = 8; const uint8_t *scan = size == 16 ? ff_zigzag_scan : ff_zigzag_direct; - if (!get_bits1(&h->gb)) /* matrix not written, we use the predicted one */ + if (!get_bits1(gb)) /* matrix not written, we use the predicted one */ memcpy(factors, fallback_list, size * sizeof(uint8_t)); else for (i = 0; i < size; i++) { if (next) - next = (last + get_se_golomb(&h->gb)) & 0xff; + next = (last + get_se_golomb(gb)) & 0xff; if (!i && !next) { /* matrix not written, we use the preset one */ memcpy(factors, jvt_list, size * sizeof(uint8_t)); break; @@ -260,7 +284,7 @@ static void decode_scaling_list(H264Context *h, uint8_t *factors, int size, } } -static void decode_scaling_matrices(H264Context *h, SPS *sps, +static void decode_scaling_matrices(GetBitContext *gb, SPS *sps, PPS *pps, int is_sps, uint8_t(*scaling_matrix4)[16], uint8_t(*scaling_matrix8)[64]) @@ -272,54 +296,58 @@ static void decode_scaling_matrices(H264Context *h, SPS *sps, fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0], fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1] }; - if (get_bits1(&h->gb)) { + if (get_bits1(gb)) { sps->scaling_matrix_present |= is_sps; - decode_scaling_list(h, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]); // Intra, Y - decode_scaling_list(h, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr - decode_scaling_list(h, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb - decode_scaling_list(h, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]); // Inter, Y - decode_scaling_list(h, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr - decode_scaling_list(h, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb + decode_scaling_list(gb, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]); // Intra, Y + decode_scaling_list(gb, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr + decode_scaling_list(gb, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb + decode_scaling_list(gb, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]); // Inter, Y + decode_scaling_list(gb, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr + decode_scaling_list(gb, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb if (is_sps || pps->transform_8x8_mode) { - decode_scaling_list(h, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y + decode_scaling_list(gb, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y if (sps->chroma_format_idc == 3) { - decode_scaling_list(h, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr - decode_scaling_list(h, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb + decode_scaling_list(gb, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr + decode_scaling_list(gb, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb } - decode_scaling_list(h, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y + decode_scaling_list(gb, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y if (sps->chroma_format_idc == 3) { - decode_scaling_list(h, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr - decode_scaling_list(h, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb + decode_scaling_list(gb, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr + decode_scaling_list(gb, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb } } } } -int ff_h264_decode_seq_parameter_set(H264Context *h) +int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, + H264ParamSets *ps) { + AVBufferRef *sps_buf; int profile_idc, level_idc, constraint_set_flags = 0; unsigned int sps_id; int i, log2_max_frame_num_minus4; SPS *sps; - profile_idc = get_bits(&h->gb, 8); - constraint_set_flags |= get_bits1(&h->gb) << 0; // constraint_set0_flag - constraint_set_flags |= get_bits1(&h->gb) << 1; // constraint_set1_flag - constraint_set_flags |= get_bits1(&h->gb) << 2; // constraint_set2_flag - constraint_set_flags |= get_bits1(&h->gb) << 3; // constraint_set3_flag - constraint_set_flags |= get_bits1(&h->gb) << 4; // constraint_set4_flag - constraint_set_flags |= get_bits1(&h->gb) << 5; // constraint_set5_flag - skip_bits(&h->gb, 2); // reserved_zero_2bits - level_idc = get_bits(&h->gb, 8); - sps_id = get_ue_golomb_31(&h->gb); + profile_idc = get_bits(gb, 8); + constraint_set_flags |= get_bits1(gb) << 0; // constraint_set0_flag + constraint_set_flags |= get_bits1(gb) << 1; // constraint_set1_flag + constraint_set_flags |= get_bits1(gb) << 2; // constraint_set2_flag + constraint_set_flags |= get_bits1(gb) << 3; // constraint_set3_flag + constraint_set_flags |= get_bits1(gb) << 4; // constraint_set4_flag + constraint_set_flags |= get_bits1(gb) << 5; // constraint_set5_flag + skip_bits(gb, 2); // reserved_zero_2bits + level_idc = get_bits(gb, 8); + sps_id = get_ue_golomb_31(gb); if (sps_id >= MAX_SPS_COUNT) { - av_log(h->avctx, AV_LOG_ERROR, "sps_id %u out of range\n", sps_id); + av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", sps_id); return AVERROR_INVALIDDATA; } - sps = av_mallocz(sizeof(SPS)); - if (!sps) + + sps_buf = av_buffer_allocz(sizeof(*sps)); + if (!sps_buf) return AVERROR(ENOMEM); + sps = (SPS*)sps_buf->data; sps->sps_id = sps_id; sps->time_offset_length = 24; @@ -342,23 +370,23 @@ int ff_h264_decode_seq_parameter_set(H264Context *h) sps->profile_idc == 128 || // Multiview High profile (MVC) sps->profile_idc == 138 || // Multiview Depth High profile (MVCD) sps->profile_idc == 144) { // old High444 profile - sps->chroma_format_idc = get_ue_golomb_31(&h->gb); + sps->chroma_format_idc = get_ue_golomb_31(gb); if (sps->chroma_format_idc > 3) { - avpriv_request_sample(h->avctx, "chroma_format_idc %u", + avpriv_request_sample(avctx, "chroma_format_idc %u", sps->chroma_format_idc); goto fail; } else if (sps->chroma_format_idc == 3) { - sps->residual_color_transform_flag = get_bits1(&h->gb); + sps->residual_color_transform_flag = get_bits1(gb); } - sps->bit_depth_luma = get_ue_golomb(&h->gb) + 8; - sps->bit_depth_chroma = get_ue_golomb(&h->gb) + 8; + sps->bit_depth_luma = get_ue_golomb(gb) + 8; + sps->bit_depth_chroma = get_ue_golomb(gb) + 8; if (sps->bit_depth_chroma != sps->bit_depth_luma) { - avpriv_request_sample(h->avctx, + avpriv_request_sample(avctx, "Different chroma and luma bit depth"); goto fail; } - sps->transform_bypass = get_bits1(&h->gb); - decode_scaling_matrices(h, sps, NULL, 1, + sps->transform_bypass = get_bits1(gb); + decode_scaling_matrices(gb, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8); } else { sps->chroma_format_idc = 1; @@ -366,85 +394,85 @@ int ff_h264_decode_seq_parameter_set(H264Context *h) sps->bit_depth_chroma = 8; } - log2_max_frame_num_minus4 = get_ue_golomb(&h->gb); + log2_max_frame_num_minus4 = get_ue_golomb(gb); if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 || log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) { - av_log(h->avctx, AV_LOG_ERROR, + av_log(avctx, AV_LOG_ERROR, "log2_max_frame_num_minus4 out of range (0-12): %d\n", log2_max_frame_num_minus4); goto fail; } sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4; - sps->poc_type = get_ue_golomb_31(&h->gb); + sps->poc_type = get_ue_golomb_31(gb); if (sps->poc_type == 0) { // FIXME #define - sps->log2_max_poc_lsb = get_ue_golomb(&h->gb) + 4; + sps->log2_max_poc_lsb = get_ue_golomb(gb) + 4; } else if (sps->poc_type == 1) { // FIXME #define - sps->delta_pic_order_always_zero_flag = get_bits1(&h->gb); - sps->offset_for_non_ref_pic = get_se_golomb(&h->gb); - sps->offset_for_top_to_bottom_field = get_se_golomb(&h->gb); - sps->poc_cycle_length = get_ue_golomb(&h->gb); + sps->delta_pic_order_always_zero_flag = get_bits1(gb); + sps->offset_for_non_ref_pic = get_se_golomb(gb); + sps->offset_for_top_to_bottom_field = get_se_golomb(gb); + sps->poc_cycle_length = get_ue_golomb(gb); if ((unsigned)sps->poc_cycle_length >= FF_ARRAY_ELEMS(sps->offset_for_ref_frame)) { - av_log(h->avctx, AV_LOG_ERROR, + av_log(avctx, AV_LOG_ERROR, "poc_cycle_length overflow %d\n", sps->poc_cycle_length); goto fail; } for (i = 0; i < sps->poc_cycle_length; i++) - sps->offset_for_ref_frame[i] = get_se_golomb(&h->gb); + sps->offset_for_ref_frame[i] = get_se_golomb(gb); } else if (sps->poc_type != 2) { - av_log(h->avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); + av_log(avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); goto fail; } - sps->ref_frame_count = get_ue_golomb_31(&h->gb); + sps->ref_frame_count = get_ue_golomb_31(gb); if (sps->ref_frame_count > H264_MAX_PICTURE_COUNT - 2 || sps->ref_frame_count >= 32U) { - av_log(h->avctx, AV_LOG_ERROR, + av_log(avctx, AV_LOG_ERROR, "too many reference frames %d\n", sps->ref_frame_count); goto fail; } - sps->gaps_in_frame_num_allowed_flag = get_bits1(&h->gb); - sps->mb_width = get_ue_golomb(&h->gb) + 1; - sps->mb_height = get_ue_golomb(&h->gb) + 1; + sps->gaps_in_frame_num_allowed_flag = get_bits1(gb); + sps->mb_width = get_ue_golomb(gb) + 1; + sps->mb_height = get_ue_golomb(gb) + 1; if ((unsigned)sps->mb_width >= INT_MAX / 16 || (unsigned)sps->mb_height >= INT_MAX / 16 || av_image_check_size(16 * sps->mb_width, - 16 * sps->mb_height, 0, h->avctx)) { - av_log(h->avctx, AV_LOG_ERROR, "mb_width/height overflow\n"); + 16 * sps->mb_height, 0, avctx)) { + av_log(avctx, AV_LOG_ERROR, "mb_width/height overflow\n"); goto fail; } - sps->frame_mbs_only_flag = get_bits1(&h->gb); + sps->frame_mbs_only_flag = get_bits1(gb); if (!sps->frame_mbs_only_flag) - sps->mb_aff = get_bits1(&h->gb); + sps->mb_aff = get_bits1(gb); else sps->mb_aff = 0; - sps->direct_8x8_inference_flag = get_bits1(&h->gb); + sps->direct_8x8_inference_flag = get_bits1(gb); if (!sps->frame_mbs_only_flag && !sps->direct_8x8_inference_flag) { - av_log(h->avctx, AV_LOG_ERROR, + av_log(avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); goto fail; } #ifndef ALLOW_INTERLACE if (sps->mb_aff) - av_log(h->avctx, AV_LOG_ERROR, + av_log(avctx, AV_LOG_ERROR, "MBAFF support not included; enable it at compile-time.\n"); #endif - sps->crop = get_bits1(&h->gb); + sps->crop = get_bits1(gb); if (sps->crop) { - unsigned int crop_left = get_ue_golomb(&h->gb); - unsigned int crop_right = get_ue_golomb(&h->gb); - unsigned int crop_top = get_ue_golomb(&h->gb); - unsigned int crop_bottom = get_ue_golomb(&h->gb); + unsigned int crop_left = get_ue_golomb(gb); + unsigned int crop_right = get_ue_golomb(gb); + unsigned int crop_top = get_ue_golomb(gb); + unsigned int crop_bottom = get_ue_golomb(gb); - if (h->avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) { - av_log(h->avctx, AV_LOG_DEBUG, "discarding sps cropping, original " + if (avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) { + av_log(avctx, AV_LOG_DEBUG, "discarding sps cropping, original " "values are l:%d r:%d t:%d b:%d\n", crop_left, crop_right, crop_top, crop_bottom); @@ -460,9 +488,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h) int step_y = (2 - sps->frame_mbs_only_flag) << vsub; if (crop_left & (0x1F >> (sps->bit_depth_luma > 8)) && - !(h->avctx->flags & AV_CODEC_FLAG_UNALIGNED)) { + !(avctx->flags & AV_CODEC_FLAG_UNALIGNED)) { crop_left &= ~(0x1F >> (sps->bit_depth_luma > 8)); - av_log(h->avctx, AV_LOG_WARNING, + av_log(avctx, AV_LOG_WARNING, "Reducing left cropping to %d " "chroma samples to preserve alignment.\n", crop_left); @@ -474,8 +502,8 @@ int ff_h264_decode_seq_parameter_set(H264Context *h) INT_MAX / step_y <= crop_top || INT_MAX / step_y - crop_top <= crop_bottom || 16 * sps->mb_height <= step_y * (crop_top + crop_bottom)) { - av_log(h->avctx, AV_LOG_WARNING, "Invalid crop parameters\n"); - if (h->avctx->err_recognition & AV_EF_EXPLODE) + av_log(avctx, AV_LOG_WARNING, "Invalid crop parameters\n"); + if (avctx->err_recognition & AV_EF_EXPLODE) goto fail; crop_left = crop_right = crop_top = crop_bottom = 0; } @@ -493,10 +521,10 @@ int ff_h264_decode_seq_parameter_set(H264Context *h) sps->crop = 0; } - sps->vui_parameters_present_flag = get_bits1(&h->gb); + sps->vui_parameters_present_flag = get_bits1(gb); if (sps->vui_parameters_present_flag) { - int ret = decode_vui_parameters(h, sps); - if (ret < 0 && h->avctx->err_recognition & AV_EF_EXPLODE) + int ret = decode_vui_parameters(gb, avctx, sps); + if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) goto fail; } @@ -516,9 +544,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h) if (!sps->sar.den) sps->sar.den = 1; - if (h->avctx->debug & FF_DEBUG_PICT_INFO) { + if (avctx->debug & FF_DEBUG_PICT_INFO) { static const char csp[4][5] = { "Gray", "420", "422", "444" }; - av_log(h->avctx, AV_LOG_DEBUG, + av_log(avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %"PRId32"/%"PRId32"\n", sps_id, sps->profile_idc, sps->level_idc, sps->poc_type, @@ -533,19 +561,95 @@ int ff_h264_decode_seq_parameter_set(H264Context *h) sps->timing_info_present_flag ? sps->num_units_in_tick : 0, sps->timing_info_present_flag ? sps->time_scale : 0); } - sps->new = 1; - av_free(h->sps_buffers[sps_id]); - h->sps_buffers[sps_id] = sps; - h->sps = *sps; + /* check if this is a repeat of an already parsed SPS, then keep the + * original one. + * otherwise drop all PPSes that depend on it */ + if (ps->sps_list[sps_id] && + !memcmp(ps->sps_list[sps_id]->data, sps_buf->data, sps_buf->size)) { + av_buffer_unref(&sps_buf); + } else { + remove_sps(ps, sps_id); + ps->sps_list[sps_id] = sps_buf; + } return 0; fail: - av_free(sps); + av_buffer_unref(&sps_buf); return AVERROR_INVALIDDATA; } +static void init_dequant8_coeff_table(PPS *pps, const SPS *sps) +{ + int i, j, q, x; + const int max_qp = 51 + 6 * (sps->bit_depth_luma - 8); + + for (i = 0; i < 6; i++) { + pps->dequant8_coeff[i] = pps->dequant8_buffer[i]; + for (j = 0; j < i; j++) + if (!memcmp(pps->scaling_matrix8[j], pps->scaling_matrix8[i], + 64 * sizeof(uint8_t))) { + pps->dequant8_coeff[i] = pps->dequant8_buffer[j]; + break; + } + if (j < i) + continue; + + for (q = 0; q < max_qp + 1; q++) { + int shift = ff_h264_quant_div6[q]; + int idx = ff_h264_quant_rem6[q]; + for (x = 0; x < 64; x++) + pps->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] = + ((uint32_t)ff_h264_dequant8_coeff_init[idx][ff_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] * + pps->scaling_matrix8[i][x]) << shift; + } + } +} + +static void init_dequant4_coeff_table(PPS *pps, const SPS *sps) +{ + int i, j, q, x; + const int max_qp = 51 + 6 * (sps->bit_depth_luma - 8); + for (i = 0; i < 6; i++) { + pps->dequant4_coeff[i] = pps->dequant4_buffer[i]; + for (j = 0; j < i; j++) + if (!memcmp(pps->scaling_matrix4[j], pps->scaling_matrix4[i], + 16 * sizeof(uint8_t))) { + pps->dequant4_coeff[i] = pps->dequant4_buffer[j]; + break; + } + if (j < i) + continue; + + for (q = 0; q < max_qp + 1; q++) { + int shift = ff_h264_quant_div6[q] + 2; + int idx = ff_h264_quant_rem6[q]; + for (x = 0; x < 16; x++) + pps->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] = + ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * + pps->scaling_matrix4[i][x]) << shift; + } + } +} + +static void init_dequant_tables(PPS *pps, const SPS *sps) +{ + int i, x; + init_dequant4_coeff_table(pps, sps); + if (pps->transform_8x8_mode) + init_dequant8_coeff_table(pps, sps); + if (sps->transform_bypass) { + for (i = 0; i < 6; i++) + for (x = 0; x < 16; x++) + pps->dequant4_coeff[i][0][x] = 1 << 6; + if (pps->transform_8x8_mode) + for (i = 0; i < 6; i++) + for (x = 0; x < 64; x++) + pps->dequant8_coeff[i][0][x] = 1 << 6; + } +} + static void build_qp_table(PPS *pps, int t, int index, const int depth) { int i; @@ -555,46 +659,50 @@ static void build_qp_table(PPS *pps, int t, int index, const int depth) ff_h264_chroma_qp[depth - 8][av_clip(i + index, 0, max_qp)]; } -int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length) +int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, + H264ParamSets *ps, int bit_length) { - const SPS *sps; - unsigned int pps_id = get_ue_golomb(&h->gb); + AVBufferRef *pps_buf; + SPS *sps; + unsigned int pps_id = get_ue_golomb(gb); PPS *pps; int qp_bd_offset; int bits_left; int ret; if (pps_id >= MAX_PPS_COUNT) { - av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); + av_log(avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); return AVERROR_INVALIDDATA; } - pps = av_mallocz(sizeof(PPS)); - if (!pps) + pps_buf = av_buffer_allocz(sizeof(*pps)); + if (!pps_buf) return AVERROR(ENOMEM); - pps->sps_id = get_ue_golomb_31(&h->gb); + pps = (PPS*)pps_buf->data; + + pps->sps_id = get_ue_golomb_31(gb); if ((unsigned)pps->sps_id >= MAX_SPS_COUNT || - !h->sps_buffers[pps->sps_id]) { - av_log(h->avctx, AV_LOG_ERROR, "sps_id %u out of range\n", pps->sps_id); + !ps->sps_list[pps->sps_id]) { + av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", pps->sps_id); ret = AVERROR_INVALIDDATA; goto fail; } - sps = h->sps_buffers[pps->sps_id]; + sps = (SPS*)ps->sps_list[pps->sps_id]->data; if (sps->bit_depth_luma > 10) { - av_log(h->avctx, AV_LOG_ERROR, + av_log(avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d (max=10)\n", sps->bit_depth_luma); ret = AVERROR_PATCHWELCOME; goto fail; } - pps->cabac = get_bits1(&h->gb); - pps->pic_order_present = get_bits1(&h->gb); - pps->slice_group_count = get_ue_golomb(&h->gb) + 1; + pps->cabac = get_bits1(gb); + pps->pic_order_present = get_bits1(gb); + pps->slice_group_count = get_ue_golomb(gb) + 1; if (pps->slice_group_count > 1) { - pps->mb_slice_group_map_type = get_ue_golomb(&h->gb); - av_log(h->avctx, AV_LOG_ERROR, "FMO not supported\n"); + pps->mb_slice_group_map_type = get_ue_golomb(gb); + av_log(avctx, AV_LOG_ERROR, "FMO not supported\n"); switch (pps->mb_slice_group_map_type) { case 0: #if 0 @@ -627,41 +735,39 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length) break; } } - pps->ref_count[0] = get_ue_golomb(&h->gb) + 1; - pps->ref_count[1] = get_ue_golomb(&h->gb) + 1; + pps->ref_count[0] = get_ue_golomb(gb) + 1; + pps->ref_count[1] = get_ue_golomb(gb) + 1; if (pps->ref_count[0] - 1 > 32 - 1 || pps->ref_count[1] - 1 > 32 - 1) { - av_log(h->avctx, AV_LOG_ERROR, "reference overflow (pps)\n"); + av_log(avctx, AV_LOG_ERROR, "reference overflow (pps)\n"); ret = AVERROR_INVALIDDATA; goto fail; } qp_bd_offset = 6 * (sps->bit_depth_luma - 8); - pps->weighted_pred = get_bits1(&h->gb); - pps->weighted_bipred_idc = get_bits(&h->gb, 2); - pps->init_qp = get_se_golomb(&h->gb) + 26 + qp_bd_offset; - pps->init_qs = get_se_golomb(&h->gb) + 26 + qp_bd_offset; - pps->chroma_qp_index_offset[0] = get_se_golomb(&h->gb); - pps->deblocking_filter_parameters_present = get_bits1(&h->gb); - pps->constrained_intra_pred = get_bits1(&h->gb); - pps->redundant_pic_cnt_present = get_bits1(&h->gb); + pps->weighted_pred = get_bits1(gb); + pps->weighted_bipred_idc = get_bits(gb, 2); + pps->init_qp = get_se_golomb(gb) + 26 + qp_bd_offset; + pps->init_qs = get_se_golomb(gb) + 26 + qp_bd_offset; + pps->chroma_qp_index_offset[0] = get_se_golomb(gb); + pps->deblocking_filter_parameters_present = get_bits1(gb); + pps->constrained_intra_pred = get_bits1(gb); + pps->redundant_pic_cnt_present = get_bits1(gb); pps->transform_8x8_mode = 0; - // contents of sps/pps can change even if id doesn't, so reinit - h->dequant_coeff_pps = -1; - memcpy(pps->scaling_matrix4, h->sps_buffers[pps->sps_id]->scaling_matrix4, + memcpy(pps->scaling_matrix4, sps->scaling_matrix4, sizeof(pps->scaling_matrix4)); - memcpy(pps->scaling_matrix8, h->sps_buffers[pps->sps_id]->scaling_matrix8, + memcpy(pps->scaling_matrix8, sps->scaling_matrix8, sizeof(pps->scaling_matrix8)); - bits_left = bit_length - get_bits_count(&h->gb); + bits_left = bit_length - get_bits_count(gb); if (bits_left && (bits_left > 8 || - show_bits(&h->gb, bits_left) != 1 << (bits_left - 1))) { - pps->transform_8x8_mode = get_bits1(&h->gb); - decode_scaling_matrices(h, h->sps_buffers[pps->sps_id], pps, 0, + show_bits(gb, bits_left) != 1 << (bits_left - 1))) { + pps->transform_8x8_mode = get_bits1(gb); + decode_scaling_matrices(gb, sps, pps, 0, pps->scaling_matrix4, pps->scaling_matrix8); // second_chroma_qp_index_offset - pps->chroma_qp_index_offset[1] = get_se_golomb(&h->gb); + pps->chroma_qp_index_offset[1] = get_se_golomb(gb); } else { pps->chroma_qp_index_offset[1] = pps->chroma_qp_index_offset[0]; } @@ -670,11 +776,14 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length) sps->bit_depth_luma); build_qp_table(pps, 1, pps->chroma_qp_index_offset[1], sps->bit_depth_luma); + + init_dequant_tables(pps, sps); + if (pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1]) pps->chroma_qp_diff = 1; - if (h->avctx->debug & FF_DEBUG_PICT_INFO) { - av_log(h->avctx, AV_LOG_DEBUG, + if (avctx->debug & FF_DEBUG_PICT_INFO) { + av_log(avctx, AV_LOG_DEBUG, "pps:%u sps:%u %s slice_groups:%d ref:%u/%u %s qp:%d/%d/%d/%d %s %s %s %s\n", pps_id, pps->sps_id, pps->cabac ? "CABAC" : "CAVLC", @@ -688,11 +797,12 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length) pps->transform_8x8_mode ? "8x8DCT" : ""); } - av_free(h->pps_buffers[pps_id]); - h->pps_buffers[pps_id] = pps; + remove_pps(ps, pps_id); + ps->pps_list[pps_id] = pps_buf; + return 0; fail: - av_free(pps); + av_buffer_unref(&pps_buf); return ret; } diff --git a/libavcodec/h264_refs.c b/libavcodec/h264_refs.c index 2102f88b58..e0a9334806 100644 --- a/libavcodec/h264_refs.c +++ b/libavcodec/h264_refs.c @@ -526,10 +526,10 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice) MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp; int mmco_index = 0, i = 0; - assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count); + assert(h->long_ref_count + h->short_ref_count <= h->ps.sps->ref_frame_count); if (h->short_ref_count && - h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count && + h->long_ref_count + h->short_ref_count == h->ps.sps->ref_frame_count && !(FIELD_PICTURE(h) && !h->first_field && h->cur_pic_ptr->reference)) { mmco[0].opcode = MMCO_SHORT2UNUSED; mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num; @@ -698,7 +698,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count) } if (h->long_ref_count + h->short_ref_count - - (h->short_ref[0] == h->cur_pic_ptr) > h->sps.ref_frame_count) { + (h->short_ref[0] == h->cur_pic_ptr) > h->ps.sps->ref_frame_count) { /* We have too many reference frames, probably due to corrupted * stream. Need to discard one frame. Prevents overrun of the @@ -707,7 +707,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count) av_log(h->avctx, AV_LOG_ERROR, "number of reference frames (%d+%d) exceeds max (%d; probably " "corrupt input), discarding one\n", - h->long_ref_count, h->short_ref_count, h->sps.ref_frame_count); + h->long_ref_count, h->short_ref_count, h->ps.sps->ref_frame_count); err = AVERROR_INVALIDDATA; if (h->long_ref_count && !h->short_ref_count) { diff --git a/libavcodec/h264_sei.c b/libavcodec/h264_sei.c index 1fb1fc5db4..aedb295f4f 100644 --- a/libavcodec/h264_sei.c +++ b/libavcodec/h264_sei.c @@ -50,14 +50,19 @@ void ff_h264_reset_sei(H264Context *h) static int decode_picture_timing(H264Context *h) { - if (h->sps.nal_hrd_parameters_present_flag || - h->sps.vcl_hrd_parameters_present_flag) { + const SPS *sps = h->ps.sps; + + if (!sps) + return AVERROR_INVALIDDATA; + + if (sps->nal_hrd_parameters_present_flag || + sps->vcl_hrd_parameters_present_flag) { h->sei_cpb_removal_delay = get_bits(&h->gb, - h->sps.cpb_removal_delay_length); + sps->cpb_removal_delay_length); h->sei_dpb_output_delay = get_bits(&h->gb, - h->sps.dpb_output_delay_length); + sps->dpb_output_delay_length); } - if (h->sps.pic_struct_present_flag) { + if (sps->pic_struct_present_flag) { unsigned int i, num_clock_ts; h->sei_pic_struct = get_bits(&h->gb, 4); @@ -93,9 +98,9 @@ static int decode_picture_timing(H264Context *h) } } } - if (h->sps.time_offset_length > 0) + if (sps->time_offset_length > 0) skip_bits(&h->gb, - h->sps.time_offset_length); /* time_offset */ + sps->time_offset_length); /* time_offset */ } } @@ -259,12 +264,12 @@ static int decode_buffering_period(H264Context *h) SPS *sps; sps_id = get_ue_golomb_31(&h->gb); - if (sps_id > 31 || !h->sps_buffers[sps_id]) { + if (sps_id > 31 || !h->ps.sps_list[sps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %d referenced in buffering period\n", sps_id); return AVERROR_INVALIDDATA; } - sps = h->sps_buffers[sps_id]; + sps = (SPS*)h->ps.sps_list[sps_id]->data; // NOTE: This is really so duplicated in the standard... See H.264, D.1.1 if (sps->nal_hrd_parameters_present_flag) { diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c index 4b9c00c76b..e2617e2ad4 100644 --- a/libavcodec/h264_slice.c +++ b/libavcodec/h264_slice.c @@ -282,76 +282,6 @@ static int initialize_cur_frame(H264Context *h) return 0; } -static void init_dequant8_coeff_table(H264Context *h) -{ - int i, j, q, x; - const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8); - - for (i = 0; i < 6; i++) { - h->dequant8_coeff[i] = h->dequant8_buffer[i]; - for (j = 0; j < i; j++) - if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i], - 64 * sizeof(uint8_t))) { - h->dequant8_coeff[i] = h->dequant8_buffer[j]; - break; - } - if (j < i) - continue; - - for (q = 0; q < max_qp + 1; q++) { - int shift = ff_h264_quant_div6[q]; - int idx = ff_h264_quant_rem6[q]; - for (x = 0; x < 64; x++) - h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] = - ((uint32_t)ff_h264_dequant8_coeff_init[idx][ff_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] * - h->pps.scaling_matrix8[i][x]) << shift; - } - } -} - -static void init_dequant4_coeff_table(H264Context *h) -{ - int i, j, q, x; - const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8); - for (i = 0; i < 6; i++) { - h->dequant4_coeff[i] = h->dequant4_buffer[i]; - for (j = 0; j < i; j++) - if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i], - 16 * sizeof(uint8_t))) { - h->dequant4_coeff[i] = h->dequant4_buffer[j]; - break; - } - if (j < i) - continue; - - for (q = 0; q < max_qp + 1; q++) { - int shift = ff_h264_quant_div6[q] + 2; - int idx = ff_h264_quant_rem6[q]; - for (x = 0; x < 16; x++) - h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] = - ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * - h->pps.scaling_matrix4[i][x]) << shift; - } - } -} - -void ff_h264_init_dequant_tables(H264Context *h) -{ - int i, x; - init_dequant4_coeff_table(h); - if (h->pps.transform_8x8_mode) - init_dequant8_coeff_table(h); - if (h->sps.transform_bypass) { - for (i = 0; i < 6; i++) - for (x = 0; x < 16; x++) - h->dequant4_coeff[i][0][x] = 1 << 6; - if (h->pps.transform_8x8_mode) - for (i = 0; i < 6; i++) - for (x = 0; x < 64; x++) - h->dequant8_coeff[i][0][x] = 1 << 6; - } -} - #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size)))) #define REBASE_PICTURE(pic, new_ctx, old_ctx) \ @@ -374,26 +304,6 @@ static void copy_picture_range(H264Picture **to, H264Picture **from, int count, } } -static int copy_parameter_set(void **to, void **from, int count, int size) -{ - int i; - - for (i = 0; i < count; i++) { - if (to[i] && !from[i]) { - av_freep(&to[i]); - } else if (from[i] && !to[i]) { - to[i] = av_malloc(size); - if (!to[i]) - return AVERROR(ENOMEM); - } - - if (from[i]) - memcpy(to[i], from[i], size); - } - - return 0; -} - #define copy_fields(to, from, start_field, end_field) \ memcpy(&to->start_field, &from->start_field, \ (char *)&to->end_field - (char *)&to->start_field) @@ -411,28 +321,40 @@ int ff_h264_update_thread_context(AVCodecContext *dst, if (dst == src || !h1->context_initialized) return 0; + if (!h1->ps.sps) + return AVERROR_INVALIDDATA; + if (inited && (h->width != h1->width || h->height != h1->height || h->mb_width != h1->mb_width || h->mb_height != h1->mb_height || - h->sps.bit_depth_luma != h1->sps.bit_depth_luma || - h->sps.chroma_format_idc != h1->sps.chroma_format_idc || - h->sps.colorspace != h1->sps.colorspace)) { + !h->ps.sps || + h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma || + h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc || + h->ps.sps->colorspace != h1->ps.sps->colorspace)) { need_reinit = 1; } // SPS/PPS - if ((ret = copy_parameter_set((void **)h->sps_buffers, - (void **)h1->sps_buffers, - MAX_SPS_COUNT, sizeof(SPS))) < 0) - return ret; - h->sps = h1->sps; - if ((ret = copy_parameter_set((void **)h->pps_buffers, - (void **)h1->pps_buffers, - MAX_PPS_COUNT, sizeof(PPS))) < 0) - return ret; - h->pps = h1->pps; + for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) { + av_buffer_unref(&h->ps.sps_list[i]); + if (h1->ps.sps_list[i]) { + h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]); + if (!h->ps.sps_list[i]) + return AVERROR(ENOMEM); + } + } + for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) { + av_buffer_unref(&h->ps.pps_list[i]); + if (h1->ps.pps_list[i]) { + h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]); + if (!h->ps.pps_list[i]) + return AVERROR(ENOMEM); + } + } + + h->ps.sps = h1->ps.sps; if (need_reinit || !inited) { h->width = h1->width; @@ -486,20 +408,6 @@ int ff_h264_update_thread_context(AVCodecContext *dst, h->is_avc = h1->is_avc; h->nal_length_size = h1->nal_length_size; - // Dequantization matrices - // FIXME these are big - can they be only copied when PPS changes? - copy_fields(h, h1, dequant4_buffer, dequant4_coeff); - - for (i = 0; i < 6; i++) - h->dequant4_coeff[i] = h->dequant4_buffer[0] + - (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]); - - for (i = 0; i < 6; i++) - h->dequant8_coeff[i] = h->dequant8_buffer[0] + - (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]); - - h->dequant_coeff_pps = h1->dequant_coeff_pps; - // POC timing copy_fields(h, h1, poc_lsb, current_slice); @@ -766,7 +674,7 @@ static void init_scan_tables(H264Context *h) h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]); #undef TRANSPOSE } - if (h->sps.transform_bypass) { // FIXME same ugly + if (h->ps.sps->transform_bypass) { // FIXME same ugly h->zigzag_scan_q0 = ff_zigzag_scan; h->zigzag_scan8x8_q0 = ff_zigzag_direct; h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc; @@ -793,7 +701,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h) enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts; const enum AVPixelFormat *choices = pix_fmts; - switch (h->sps.bit_depth_luma) { + switch (h->ps.sps->bit_depth_luma) { case 9: if (CHROMA444(h)) { if (h->avctx->colorspace == AVCOL_SPC_RGB) { @@ -856,7 +764,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h) break; default: av_log(h->avctx, AV_LOG_ERROR, - "Unsupported bit depth %d\n", h->sps.bit_depth_luma); + "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma); return AVERROR_INVALIDDATA; } @@ -868,8 +776,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h) /* export coded and cropped frame dimensions to AVCodecContext */ static int init_dimensions(H264Context *h) { - int width = h->width - (h->sps.crop_right + h->sps.crop_left); - int height = h->height - (h->sps.crop_top + h->sps.crop_bottom); + SPS *sps = h->ps.sps; + int width = h->width - (sps->crop_right + sps->crop_left); + int height = h->height - (sps->crop_top + sps->crop_bottom); /* handle container cropping */ if (FFALIGN(h->avctx->width, 16) == FFALIGN(width, 16) && @@ -885,11 +794,11 @@ static int init_dimensions(H264Context *h) return AVERROR_INVALIDDATA; av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n"); - h->sps.crop_bottom = - h->sps.crop_top = - h->sps.crop_right = - h->sps.crop_left = - h->sps.crop = 0; + sps->crop_bottom = + sps->crop_top = + sps->crop_right = + sps->crop_left = + sps->crop = 0; width = h->width; height = h->height; @@ -905,21 +814,22 @@ static int init_dimensions(H264Context *h) static int h264_slice_header_init(H264Context *h) { + const SPS *sps = h->ps.sps; int nb_slices = (HAVE_THREADS && h->avctx->active_thread_type & FF_THREAD_SLICE) ? h->avctx->thread_count : 1; int i, ret; - ff_set_sar(h->avctx, h->sps.sar); + ff_set_sar(h->avctx, sps->sar); av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt, &h->chroma_x_shift, &h->chroma_y_shift); - if (h->sps.timing_info_present_flag) { - int64_t den = h->sps.time_scale; + if (sps->timing_info_present_flag) { + int64_t den = sps->time_scale; if (h->x264_build < 44U) den *= 2; av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num, - h->sps.num_units_in_tick, den, 1 << 30); + sps->num_units_in_tick, den, 1 << 30); } ff_h264_free_tables(h); @@ -934,24 +844,24 @@ static int h264_slice_header_init(H264Context *h) return ret; } - if (h->sps.bit_depth_luma < 8 || h->sps.bit_depth_luma > 10) { + if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 10) { av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n", - h->sps.bit_depth_luma); + sps->bit_depth_luma); return AVERROR_INVALIDDATA; } - h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; - h->pixel_shift = h->sps.bit_depth_luma > 8; - h->chroma_format_idc = h->sps.chroma_format_idc; - h->bit_depth_luma = h->sps.bit_depth_luma; + h->avctx->bits_per_raw_sample = sps->bit_depth_luma; + h->pixel_shift = sps->bit_depth_luma > 8; + h->chroma_format_idc = sps->chroma_format_idc; + h->bit_depth_luma = sps->bit_depth_luma; - ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma, - h->sps.chroma_format_idc); - ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma); - ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma); - ff_h264_pred_init(&h->hpc, h->avctx->codec_id, h->sps.bit_depth_luma, - h->sps.chroma_format_idc); - ff_videodsp_init(&h->vdsp, h->sps.bit_depth_luma); + ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma, + sps->chroma_format_idc); + ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma); + ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma); + ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma, + sps->chroma_format_idc); + ff_videodsp_init(&h->vdsp, sps->bit_depth_luma); if (nb_slices > H264_MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) { int max_slices; @@ -1002,6 +912,8 @@ static int h264_slice_header_init(H264Context *h) */ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) { + const SPS *sps; + const PPS *pps; unsigned int first_mb_in_slice; unsigned int pps_id; int ret; @@ -1064,39 +976,36 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); return AVERROR_INVALIDDATA; } - if (!h->pps_buffers[pps_id]) { + if (!h->ps.pps_list[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); return AVERROR_INVALIDDATA; } if (!h->setup_finished) { - h->pps = *h->pps_buffers[pps_id]; - } else if (h->dequant_coeff_pps != pps_id) { + h->ps.pps = (const PPS*)h->ps.pps_list[pps_id]->data; + } else if (h->ps.pps != (const PPS*)h->ps.pps_list[pps_id]->data) { av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n"); return AVERROR_INVALIDDATA; } - if (!h->sps_buffers[h->pps.sps_id]) { + if (!h->ps.sps_list[h->ps.pps->sps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", - h->pps.sps_id); + h->ps.pps->sps_id); return AVERROR_INVALIDDATA; } - if (h->pps.sps_id != h->sps.sps_id || - h->sps_buffers[h->pps.sps_id]->new) { - h->sps_buffers[h->pps.sps_id]->new = 0; - - h->sps = *h->sps_buffers[h->pps.sps_id]; + if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) { + h->ps.sps = (SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data; - if (h->bit_depth_luma != h->sps.bit_depth_luma || - h->chroma_format_idc != h->sps.chroma_format_idc) + if (h->bit_depth_luma != h->ps.sps->bit_depth_luma || + h->chroma_format_idc != h->ps.sps->chroma_format_idc) needs_reinit = 1; if (h->flags & AV_CODEC_FLAG_LOW_DELAY || - (h->sps.bitstream_restriction_flag && - !h->sps.num_reorder_frames)) { + (h->ps.sps->bitstream_restriction_flag && + !h->ps.sps->num_reorder_frames)) { if (h->avctx->has_b_frames > 1 || h->delayed_pic[0]) av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. " "Reenabling low delay requires a codec flush.\n"); @@ -1109,23 +1018,26 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) } + pps = h->ps.pps; + sps = h->ps.sps; + if (!h->setup_finished) { - h->avctx->profile = ff_h264_get_profile(&h->sps); - h->avctx->level = h->sps.level_idc; - h->avctx->refs = h->sps.ref_frame_count; + h->avctx->profile = ff_h264_get_profile(sps); + h->avctx->level = sps->level_idc; + h->avctx->refs = sps->ref_frame_count; - if (h->mb_width != h->sps.mb_width || - h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)) + if (h->mb_width != sps->mb_width || + h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag)) needs_reinit = 1; - h->mb_width = h->sps.mb_width; - h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); + h->mb_width = sps->mb_width; + h->mb_height = sps->mb_height * (2 - sps->frame_mbs_only_flag); h->mb_num = h->mb_width * h->mb_height; h->mb_stride = h->mb_width + 1; h->b_stride = h->mb_width * 4; - h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p + h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p h->width = 16 * h->mb_width; h->height = 16 * h->mb_height; @@ -1134,15 +1046,15 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) if (ret < 0) return ret; - if (h->sps.video_signal_type_present_flag) { - h->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG + if (sps->video_signal_type_present_flag) { + h->avctx->color_range = sps->full_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; - if (h->sps.colour_description_present_flag) { - if (h->avctx->colorspace != h->sps.colorspace) + if (sps->colour_description_present_flag) { + if (h->avctx->colorspace != sps->colorspace) needs_reinit = 1; - h->avctx->color_primaries = h->sps.color_primaries; - h->avctx->color_trc = h->sps.color_trc; - h->avctx->colorspace = h->sps.colorspace; + h->avctx->color_primaries = sps->color_primaries; + h->avctx->color_trc = sps->color_trc; + h->avctx->colorspace = sps->colorspace; } } } @@ -1192,12 +1104,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) } } - if (sl == h->slice_ctx && h->dequant_coeff_pps != pps_id) { - h->dequant_coeff_pps = pps_id; - ff_h264_init_dequant_tables(h); - } - - frame_num = get_bits(&sl->gb, h->sps.log2_max_frame_num); + frame_num = get_bits(&sl->gb, sps->log2_max_frame_num); if (!h->setup_finished) h->frame_num = frame_num; @@ -1207,7 +1114,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) last_pic_droppable = h->droppable; droppable = h->nal_ref_idc == 0; - if (h->sps.frame_mbs_only_flag) { + if (sps->frame_mbs_only_flag) { picture_structure = PICT_FRAME; } else { field_pic_flag = get_bits1(&sl->gb); @@ -1216,7 +1123,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { picture_structure = PICT_FRAME; - mb_aff_frame = h->sps.mb_aff; + mb_aff_frame = sps->mb_aff; } } if (!h->setup_finished) { @@ -1244,13 +1151,13 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) * frames just to throw them away */ if (h->frame_num != h->prev_frame_num) { int unwrap_prev_frame_num = h->prev_frame_num; - int max_frame_num = 1 << h->sps.log2_max_frame_num; + int max_frame_num = 1 << sps->log2_max_frame_num; if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num; - if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) { - unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1; + if ((h->frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) { + unwrap_prev_frame_num = (h->frame_num - sps->ref_frame_count) - 1; if (unwrap_prev_frame_num < 0) unwrap_prev_frame_num += max_frame_num; @@ -1310,7 +1217,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) } while (h->frame_num != h->prev_frame_num && - h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) { + h->frame_num != (h->prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) { H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num); @@ -1321,7 +1228,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) } h->prev_frame_num++; - h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; + h->prev_frame_num %= 1 << sps->log2_max_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); @@ -1412,35 +1319,35 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) if (h->picture_structure == PICT_FRAME) { h->curr_pic_num = h->frame_num; - h->max_pic_num = 1 << h->sps.log2_max_frame_num; + h->max_pic_num = 1 << sps->log2_max_frame_num; } else { h->curr_pic_num = 2 * h->frame_num + 1; - h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1); + h->max_pic_num = 1 << (sps->log2_max_frame_num + 1); } if (h->nal_unit_type == NAL_IDR_SLICE) get_ue_golomb(&sl->gb); /* idr_pic_id */ - if (h->sps.poc_type == 0) { - int poc_lsb = get_bits(&sl->gb, h->sps.log2_max_poc_lsb); + if (sps->poc_type == 0) { + int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb); if (!h->setup_finished) h->poc_lsb = poc_lsb; - if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) { + if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) { int delta_poc_bottom = get_se_golomb(&sl->gb); if (!h->setup_finished) h->delta_poc_bottom = delta_poc_bottom; } } - if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) { + if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) { int delta_poc = get_se_golomb(&sl->gb); if (!h->setup_finished) h->delta_poc[0] = delta_poc; - if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) { + if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) { delta_poc = get_se_golomb(&sl->gb); if (!h->setup_finished) @@ -1451,14 +1358,14 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) if (!h->setup_finished) ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); - if (h->pps.redundant_pic_cnt_present) + if (pps->redundant_pic_cnt_present) sl->redundant_pic_count = get_ue_golomb(&sl->gb); if (sl->slice_type_nos == AV_PICTURE_TYPE_B) sl->direct_spatial_mv_pred = get_bits1(&sl->gb); ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count, - &sl->gb, &h->pps, sl->slice_type_nos, + &sl->gb, pps, sl->slice_type_nos, h->picture_structure); if (ret < 0) return ret; @@ -1471,12 +1378,12 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) } } - if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) || - (h->pps.weighted_bipred_idc == 1 && + if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) || + (pps->weighted_bipred_idc == 1 && sl->slice_type_nos == AV_PICTURE_TYPE_B)) - ff_h264_pred_weight_table(&sl->gb, &h->sps, sl->ref_count, + ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count, sl->slice_type_nos, &sl->pwt); - else if (h->pps.weighted_bipred_idc == 2 && + else if (pps->weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, sl, -1); } else { @@ -1503,7 +1410,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) if (FRAME_MBAFF(h)) { ff_h264_fill_mbaff_ref_list(h, sl); - if (h->pps.weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) { + if (pps->weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, sl, 0); implicit_weight_table(h, sl, 1); } @@ -1513,7 +1420,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) ff_h264_direct_dist_scale_factor(h, sl); ff_h264_direct_ref_list_init(h, sl); - if (sl->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) { + if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp); @@ -1523,8 +1430,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) } sl->last_qscale_diff = 0; - tmp = h->pps.init_qp + get_se_golomb(&sl->gb); - if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) { + tmp = pps->init_qp + get_se_golomb(&sl->gb); + if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); return AVERROR_INVALIDDATA; } @@ -1541,7 +1448,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) sl->deblocking_filter = 1; sl->slice_alpha_c0_offset = 0; sl->slice_beta_offset = 0; - if (h->pps.deblocking_filter_parameters_present) { + if (pps->deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, @@ -1598,9 +1505,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) sl->qp_thresh = 15 - FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) - FFMAX3(0, - h->pps.chroma_qp_index_offset[0], - h->pps.chroma_qp_index_offset[1]) + - 6 * (h->sps.bit_depth_luma - 8); + pps->chroma_qp_index_offset[0], + pps->chroma_qp_index_offset[1]) + + 6 * (sps->bit_depth_luma - 8); sl->slice_num = ++h->current_slice; if (sl->slice_num >= MAX_SLICES) { @@ -1870,7 +1777,7 @@ static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb /* CAVLC 8x8dct requires NNZ values for residual decoding that differ * from what the loop filter needs */ - if (!CABAC(h) && h->pps.transform_8x8_mode) { + if (!CABAC(h) && h->ps.pps->transform_8x8_mode) { if (IS_8x8DCT(top_type)) { nnz_cache[4 + 8 * 0] = nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12; @@ -2060,7 +1967,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) avctx->codec_id != AV_CODEC_ID_H264 || (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY)); - if (h->pps.cabac) { + if (h->ps.pps->cabac) { /* realign */ align_get_bits(&sl->gb); diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c index 2e2626edc5..9c17ac1374 100644 --- a/libavcodec/vaapi_h264.c +++ b/libavcodec/vaapi_h264.c @@ -227,6 +227,8 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx, { H264Context * const h = avctx->priv_data; struct vaapi_context * const vactx = avctx->hwaccel_context; + const PPS *pps = h->ps.pps; + const SPS *sps = h->ps.sps; VAPictureParameterBufferH264 *pic_param; VAIQMatrixBufferH264 *iq_matrix; @@ -243,38 +245,38 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx, return -1; pic_param->picture_width_in_mbs_minus1 = h->mb_width - 1; pic_param->picture_height_in_mbs_minus1 = h->mb_height - 1; - pic_param->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8; - pic_param->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8; - pic_param->num_ref_frames = h->sps.ref_frame_count; + pic_param->bit_depth_luma_minus8 = sps->bit_depth_luma - 8; + pic_param->bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8; + pic_param->num_ref_frames = sps->ref_frame_count; pic_param->seq_fields.value = 0; /* reset all bits */ - pic_param->seq_fields.bits.chroma_format_idc = h->sps.chroma_format_idc; - pic_param->seq_fields.bits.residual_colour_transform_flag = h->sps.residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */ - pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = h->sps.gaps_in_frame_num_allowed_flag; - pic_param->seq_fields.bits.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; - pic_param->seq_fields.bits.mb_adaptive_frame_field_flag = h->sps.mb_aff; - pic_param->seq_fields.bits.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; - pic_param->seq_fields.bits.MinLumaBiPredSize8x8 = h->sps.level_idc >= 31; /* A.3.3.2 */ - pic_param->seq_fields.bits.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; - pic_param->seq_fields.bits.pic_order_cnt_type = h->sps.poc_type; - pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4; - pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; - pic_param->num_slice_groups_minus1 = h->pps.slice_group_count - 1; - pic_param->slice_group_map_type = h->pps.mb_slice_group_map_type; + pic_param->seq_fields.bits.chroma_format_idc = sps->chroma_format_idc; + pic_param->seq_fields.bits.residual_colour_transform_flag = sps->residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */ + pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = sps->gaps_in_frame_num_allowed_flag; + pic_param->seq_fields.bits.frame_mbs_only_flag = sps->frame_mbs_only_flag; + pic_param->seq_fields.bits.mb_adaptive_frame_field_flag = sps->mb_aff; + pic_param->seq_fields.bits.direct_8x8_inference_flag = sps->direct_8x8_inference_flag; + pic_param->seq_fields.bits.MinLumaBiPredSize8x8 = sps->level_idc >= 31; /* A.3.3.2 */ + pic_param->seq_fields.bits.log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4; + pic_param->seq_fields.bits.pic_order_cnt_type = sps->poc_type; + pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4; + pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag; + pic_param->num_slice_groups_minus1 = pps->slice_group_count - 1; + pic_param->slice_group_map_type = pps->mb_slice_group_map_type; pic_param->slice_group_change_rate_minus1 = 0; /* XXX: unimplemented in Libav */ - pic_param->pic_init_qp_minus26 = h->pps.init_qp - 26; - pic_param->pic_init_qs_minus26 = h->pps.init_qs - 26; - pic_param->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; - pic_param->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; + pic_param->pic_init_qp_minus26 = pps->init_qp - 26; + pic_param->pic_init_qs_minus26 = pps->init_qs - 26; + pic_param->chroma_qp_index_offset = pps->chroma_qp_index_offset[0]; + pic_param->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1]; pic_param->pic_fields.value = 0; /* reset all bits */ - pic_param->pic_fields.bits.entropy_coding_mode_flag = h->pps.cabac; - pic_param->pic_fields.bits.weighted_pred_flag = h->pps.weighted_pred; - pic_param->pic_fields.bits.weighted_bipred_idc = h->pps.weighted_bipred_idc; - pic_param->pic_fields.bits.transform_8x8_mode_flag = h->pps.transform_8x8_mode; + pic_param->pic_fields.bits.entropy_coding_mode_flag = pps->cabac; + pic_param->pic_fields.bits.weighted_pred_flag = pps->weighted_pred; + pic_param->pic_fields.bits.weighted_bipred_idc = pps->weighted_bipred_idc; + pic_param->pic_fields.bits.transform_8x8_mode_flag = pps->transform_8x8_mode; pic_param->pic_fields.bits.field_pic_flag = h->picture_structure != PICT_FRAME; - pic_param->pic_fields.bits.constrained_intra_pred_flag = h->pps.constrained_intra_pred; - pic_param->pic_fields.bits.pic_order_present_flag = h->pps.pic_order_present; - pic_param->pic_fields.bits.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; - pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; + pic_param->pic_fields.bits.constrained_intra_pred_flag = pps->constrained_intra_pred; + pic_param->pic_fields.bits.pic_order_present_flag = pps->pic_order_present; + pic_param->pic_fields.bits.deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present; + pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = pps->redundant_pic_cnt_present; pic_param->pic_fields.bits.reference_pic_flag = h->nal_ref_idc != 0; pic_param->frame_num = h->frame_num; @@ -282,9 +284,9 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx, iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264)); if (!iq_matrix) return -1; - memcpy(iq_matrix->ScalingList4x4, h->pps.scaling_matrix4, sizeof(iq_matrix->ScalingList4x4)); - memcpy(iq_matrix->ScalingList8x8[0], h->pps.scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0])); - memcpy(iq_matrix->ScalingList8x8[1], h->pps.scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0])); + memcpy(iq_matrix->ScalingList4x4, pps->scaling_matrix4, sizeof(iq_matrix->ScalingList4x4)); + memcpy(iq_matrix->ScalingList8x8[0], pps->scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0])); + memcpy(iq_matrix->ScalingList8x8[1], pps->scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0])); return 0; } @@ -335,7 +337,7 @@ static int vaapi_h264_decode_slice(AVCodecContext *avctx, slice_param->num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0; slice_param->num_ref_idx_l1_active_minus1 = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0; slice_param->cabac_init_idc = sl->cabac_init_idc; - slice_param->slice_qp_delta = sl->qscale - h->pps.init_qp; + slice_param->slice_qp_delta = sl->qscale - h->ps.pps->init_qp; slice_param->disable_deblocking_filter_idc = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter; slice_param->slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2; slice_param->slice_beta_offset_div2 = sl->slice_beta_offset / 2; diff --git a/libavcodec/vdpau_h264.c b/libavcodec/vdpau_h264.c index d03d127ee5..877e4e6435 100644 --- a/libavcodec/vdpau_h264.c +++ b/libavcodec/vdpau_h264.c @@ -120,6 +120,8 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { H264Context * const h = avctx->priv_data; + const PPS *pps = h->ps.pps; + const SPS *sps = h->ps.sps; H264Picture *pic = h->cur_pic_ptr; struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private; VdpPictureInfoH264 *info = &pic_ctx->info.h264; @@ -135,37 +137,37 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx, info->frame_num = h->frame_num; info->field_pic_flag = h->picture_structure != PICT_FRAME; info->bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; - info->num_ref_frames = h->sps.ref_frame_count; - info->mb_adaptive_frame_field_flag = h->sps.mb_aff && !info->field_pic_flag; - info->constrained_intra_pred_flag = h->pps.constrained_intra_pred; - info->weighted_pred_flag = h->pps.weighted_pred; - info->weighted_bipred_idc = h->pps.weighted_bipred_idc; - info->frame_mbs_only_flag = h->sps.frame_mbs_only_flag; - info->transform_8x8_mode_flag = h->pps.transform_8x8_mode; - info->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; - info->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; - info->pic_init_qp_minus26 = h->pps.init_qp - 26; - info->num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; - info->num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; - info->log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; - info->pic_order_cnt_type = h->sps.poc_type; - info->log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4; - info->delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; - info->direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; + info->num_ref_frames = sps->ref_frame_count; + info->mb_adaptive_frame_field_flag = sps->mb_aff && !info->field_pic_flag; + info->constrained_intra_pred_flag = pps->constrained_intra_pred; + info->weighted_pred_flag = pps->weighted_pred; + info->weighted_bipred_idc = pps->weighted_bipred_idc; + info->frame_mbs_only_flag = sps->frame_mbs_only_flag; + info->transform_8x8_mode_flag = pps->transform_8x8_mode; + info->chroma_qp_index_offset = pps->chroma_qp_index_offset[0]; + info->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1]; + info->pic_init_qp_minus26 = pps->init_qp - 26; + info->num_ref_idx_l0_active_minus1 = pps->ref_count[0] - 1; + info->num_ref_idx_l1_active_minus1 = pps->ref_count[1] - 1; + info->log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4; + info->pic_order_cnt_type = sps->poc_type; + info->log2_max_pic_order_cnt_lsb_minus4 = sps->poc_type ? 0 : sps->log2_max_poc_lsb - 4; + info->delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag; + info->direct_8x8_inference_flag = sps->direct_8x8_inference_flag; #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE - info2->qpprime_y_zero_transform_bypass_flag = h->sps.transform_bypass; - info2->separate_colour_plane_flag = h->sps.residual_color_transform_flag; + info2->qpprime_y_zero_transform_bypass_flag = sps->transform_bypass; + info2->separate_colour_plane_flag = sps->residual_color_transform_flag; #endif - info->entropy_coding_mode_flag = h->pps.cabac; - info->pic_order_present_flag = h->pps.pic_order_present; - info->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; - info->redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; + info->entropy_coding_mode_flag = pps->cabac; + info->pic_order_present_flag = pps->pic_order_present; + info->deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present; + info->redundant_pic_cnt_present_flag = pps->redundant_pic_cnt_present; - memcpy(info->scaling_lists_4x4, h->pps.scaling_matrix4, + memcpy(info->scaling_lists_4x4, pps->scaling_matrix4, sizeof(info->scaling_lists_4x4)); - memcpy(info->scaling_lists_8x8[0], h->pps.scaling_matrix8[0], + memcpy(info->scaling_lists_8x8[0], pps->scaling_matrix8[0], sizeof(info->scaling_lists_8x8[0])); - memcpy(info->scaling_lists_8x8[1], h->pps.scaling_matrix8[3], + memcpy(info->scaling_lists_8x8[1], pps->scaling_matrix8[3], sizeof(info->scaling_lists_8x8[1])); vdpau_h264_set_reference_frames(avctx); |