diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2013-11-02 10:30:30 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2013-11-02 10:30:30 +0100 |
commit | 1344c0453615a1f0141b11f5f4e3c640ee890afc (patch) | |
tree | b89adf4ab7358fb23496c9469090dc19cc695dbd | |
parent | 0b82fdcc469ec8f5dd4affe1fe5ae8710b373cd2 (diff) | |
parent | dc6ea00cd2b91b591e6726e5bf1d5e03a4a9bdd0 (diff) | |
download | ffmpeg-1344c0453615a1f0141b11f5f4e3c640ee890afc.tar.gz |
Merge commit 'dc6ea00cd2b91b591e6726e5bf1d5e03a4a9bdd0'
* commit 'dc6ea00cd2b91b591e6726e5bf1d5e03a4a9bdd0':
mpeg4video: K&R formatting cosmetics
Conflicts:
libavcodec/mpeg4video.h
Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r-- | libavcodec/mpeg4video.c | 182 | ||||
-rw-r--r-- | libavcodec/mpeg4video.h | 103 |
2 files changed, 156 insertions, 129 deletions
diff --git a/libavcodec/mpeg4video.c b/libavcodec/mpeg4video.c index 9b86997a56..3f92ba5a99 100644 --- a/libavcodec/mpeg4video.c +++ b/libavcodec/mpeg4video.c @@ -24,19 +24,20 @@ #include "mpeg4video.h" #include "mpeg4data.h" -uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3]; - -int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s){ - switch(s->pict_type){ - case AV_PICTURE_TYPE_I: - return 16; - case AV_PICTURE_TYPE_P: - case AV_PICTURE_TYPE_S: - return s->f_code+15; - case AV_PICTURE_TYPE_B: - return FFMAX3(s->f_code, s->b_code, 2) + 15; - default: - return -1; +uint8_t ff_mpeg4_static_rl_table_store[3][2][2 * MAX_RUN + MAX_LEVEL + 3]; + +int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s) +{ + switch (s->pict_type) { + case AV_PICTURE_TYPE_I: + return 16; + case AV_PICTURE_TYPE_P: + case AV_PICTURE_TYPE_S: + return s->f_code + 15; + case AV_PICTURE_TYPE_B: + return FFMAX3(s->f_code, s->b_code, 2) + 15; + default: + return -1; } } @@ -44,70 +45,75 @@ void ff_mpeg4_clean_buffers(MpegEncContext *s) { int c_wrap, c_xy, l_wrap, l_xy; - l_wrap= s->b8_stride; - l_xy= (2*s->mb_y-1)*l_wrap + s->mb_x*2 - 1; - c_wrap= s->mb_stride; - c_xy= (s->mb_y-1)*c_wrap + s->mb_x - 1; + l_wrap = s->b8_stride; + l_xy = (2 * s->mb_y - 1) * l_wrap + s->mb_x * 2 - 1; + c_wrap = s->mb_stride; + c_xy = (s->mb_y - 1) * c_wrap + s->mb_x - 1; #if 0 /* clean DC */ - memsetw(s->dc_val[0] + l_xy, 1024, l_wrap*2+1); - memsetw(s->dc_val[1] + c_xy, 1024, c_wrap+1); - memsetw(s->dc_val[2] + c_xy, 1024, c_wrap+1); + memsetw(s->dc_val[0] + l_xy, 1024, l_wrap * 2 + 1); + memsetw(s->dc_val[1] + c_xy, 1024, c_wrap + 1); + memsetw(s->dc_val[2] + c_xy, 1024, c_wrap + 1); #endif /* clean AC */ - memset(s->ac_val[0] + l_xy, 0, (l_wrap*2+1)*16*sizeof(int16_t)); - memset(s->ac_val[1] + c_xy, 0, (c_wrap +1)*16*sizeof(int16_t)); - memset(s->ac_val[2] + c_xy, 0, (c_wrap +1)*16*sizeof(int16_t)); + memset(s->ac_val[0] + l_xy, 0, (l_wrap * 2 + 1) * 16 * sizeof(int16_t)); + memset(s->ac_val[1] + c_xy, 0, (c_wrap + 1) * 16 * sizeof(int16_t)); + memset(s->ac_val[2] + c_xy, 0, (c_wrap + 1) * 16 * sizeof(int16_t)); /* clean MV */ // we can't clear the MVs as they might be needed by a b frame -// memset(s->motion_val + l_xy, 0, (l_wrap*2+1)*2*sizeof(int16_t)); -// memset(s->motion_val, 0, 2*sizeof(int16_t)*(2 + s->mb_width*2)*(2 + s->mb_height*2)); - s->last_mv[0][0][0]= - s->last_mv[0][0][1]= - s->last_mv[1][0][0]= - s->last_mv[1][0][1]= 0; +// memset(s->motion_val + l_xy, 0, (l_wrap * 2 + 1) * 2 * sizeof(int16_t)); +// memset(s->motion_val, 0, 2 * sizeof(int16_t) * (2 + s->mb_width * 2) * +// (2 + s->mb_height * 2)); + s->last_mv[0][0][0] = + s->last_mv[0][0][1] = + s->last_mv[1][0][0] = + s->last_mv[1][0][1] = 0; } #define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0])) -#define tab_bias (tab_size/2) +#define tab_bias (tab_size / 2) -//used by mpeg4 and rv10 decoder -void ff_mpeg4_init_direct_mv(MpegEncContext *s){ +// used by mpeg4 and rv10 decoder +void ff_mpeg4_init_direct_mv(MpegEncContext *s) +{ int i; - for(i=0; i<tab_size; i++){ - s->direct_scale_mv[0][i] = (i-tab_bias)*s->pb_time/s->pp_time; - s->direct_scale_mv[1][i] = (i-tab_bias)*(s->pb_time-s->pp_time)/s->pp_time; + for (i = 0; i < tab_size; i++) { + s->direct_scale_mv[0][i] = (i - tab_bias) * s->pb_time / s->pp_time; + s->direct_scale_mv[1][i] = (i - tab_bias) * (s->pb_time - s->pp_time) / + s->pp_time; } } -static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, int i){ - int xy= s->block_index[i]; - uint16_t time_pp= s->pp_time; - uint16_t time_pb= s->pb_time; +static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, + int my, int i) +{ + int xy = s->block_index[i]; + uint16_t time_pp = s->pp_time; + uint16_t time_pb = s->pb_time; int p_mx, p_my; p_mx = s->next_picture.motion_val[0][xy][0]; - if((unsigned)(p_mx + tab_bias) < tab_size){ + if ((unsigned)(p_mx + tab_bias) < tab_size) { s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx; s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx : s->direct_scale_mv[1][p_mx + tab_bias]; - }else{ - s->mv[0][i][0] = p_mx*time_pb/time_pp + mx; + } else { + s->mv[0][i][0] = p_mx * time_pb / time_pp + mx; s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx - : p_mx*(time_pb - time_pp)/time_pp; + : p_mx * (time_pb - time_pp) / time_pp; } p_my = s->next_picture.motion_val[0][xy][1]; - if((unsigned)(p_my + tab_bias) < tab_size){ + if ((unsigned)(p_my + tab_bias) < tab_size) { s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my; s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my : s->direct_scale_mv[1][p_my + tab_bias]; - }else{ - s->mv[0][i][1] = p_my*time_pb/time_pp + my; + } else { + s->mv[0][i][1] = p_my * time_pb / time_pp + my; s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my - : p_my*(time_pb - time_pp)/time_pp; + : p_my * (time_pb - time_pp) / time_pp; } } @@ -115,56 +121,72 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, #undef tab_bias /** - * * @return the mb_type */ -int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){ - const int mb_index= s->mb_x + s->mb_y*s->mb_stride; +int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my) +{ + const int mb_index = s->mb_x + s->mb_y * s->mb_stride; const int colocated_mb_type = s->next_picture.mb_type[mb_index]; uint16_t time_pp; uint16_t time_pb; int i; - //FIXME avoid divides + // FIXME avoid divides // try special case with shifts for 1 and 3 B-frames? - if(IS_8X8(colocated_mb_type)){ + if (IS_8X8(colocated_mb_type)) { s->mv_type = MV_TYPE_8X8; - for(i=0; i<4; i++){ + for (i = 0; i < 4; i++) ff_mpeg4_set_one_direct_mv(s, mx, my, i); - } return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1; - } else if(IS_INTERLACED(colocated_mb_type)){ + } else if (IS_INTERLACED(colocated_mb_type)) { s->mv_type = MV_TYPE_FIELD; - for(i=0; i<2; i++){ + for (i = 0; i < 2; i++) { int field_select = s->next_picture.ref_index[0][4 * mb_index + 2 * i]; - s->field_select[0][i]= field_select; - s->field_select[1][i]= i; - if(s->top_field_first){ - time_pp= s->pp_field_time - field_select + i; - time_pb= s->pb_field_time - field_select + i; - }else{ - time_pp= s->pp_field_time + field_select - i; - time_pb= s->pb_field_time + field_select - i; + s->field_select[0][i] = field_select; + s->field_select[1][i] = i; + if (s->top_field_first) { + time_pp = s->pp_field_time - field_select + i; + time_pb = s->pb_field_time - field_select + i; + } else { + time_pp = s->pp_field_time + field_select - i; + time_pb = s->pb_field_time + field_select - i; } - s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0]*time_pb/time_pp + mx; - s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1]*time_pb/time_pp + my; - s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->p_field_mv_table[i][0][mb_index][0] - : s->p_field_mv_table[i][0][mb_index][0]*(time_pb - time_pp)/time_pp; - s->mv[1][i][1] = my ? s->mv[0][i][1] - s->p_field_mv_table[i][0][mb_index][1] - : s->p_field_mv_table[i][0][mb_index][1]*(time_pb - time_pp)/time_pp; + s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0] * + time_pb / time_pp + mx; + s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1] * + time_pb / time_pp + my; + s->mv[1][i][0] = mx ? s->mv[0][i][0] - + s->p_field_mv_table[i][0][mb_index][0] + : s->p_field_mv_table[i][0][mb_index][0] * + (time_pb - time_pp) / time_pp; + s->mv[1][i][1] = my ? s->mv[0][i][1] - + s->p_field_mv_table[i][0][mb_index][1] + : s->p_field_mv_table[i][0][mb_index][1] * + (time_pb - time_pp) / time_pp; } - return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_INTERLACED; - }else{ + return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | + MB_TYPE_L0L1 | MB_TYPE_INTERLACED; + } else { ff_mpeg4_set_one_direct_mv(s, mx, my, 0); - s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->mv[0][0][0]; - s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->mv[0][0][1]; - s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = s->mv[1][0][0]; - s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = s->mv[1][0][1]; - if((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || !s->quarter_sample) - s->mv_type= MV_TYPE_16X16; + s->mv[0][1][0] = + s->mv[0][2][0] = + s->mv[0][3][0] = s->mv[0][0][0]; + s->mv[0][1][1] = + s->mv[0][2][1] = + s->mv[0][3][1] = s->mv[0][0][1]; + s->mv[1][1][0] = + s->mv[1][2][0] = + s->mv[1][3][0] = s->mv[1][0][0]; + s->mv[1][1][1] = + s->mv[1][2][1] = + s->mv[1][3][1] = s->mv[1][0][1]; + if ((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || + !s->quarter_sample) + s->mv_type = MV_TYPE_16X16; else - s->mv_type= MV_TYPE_8X8; - return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1; //Note see prev line + s->mv_type = MV_TYPE_8X8; + // Note see prev line + return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1; } } diff --git a/libavcodec/mpeg4video.h b/libavcodec/mpeg4video.h index c4d23c9c2b..24e25880f5 100644 --- a/libavcodec/mpeg4video.h +++ b/libavcodec/mpeg4video.h @@ -24,6 +24,7 @@ #define AVCODEC_MPEG4VIDEO_H #include <stdint.h> + #include "get_bits.h" #include "mpegvideo.h" #include "rl.h" @@ -34,13 +35,13 @@ #define BIN_ONLY_SHAPE 2 #define GRAY_SHAPE 3 -#define SIMPLE_VO_TYPE 1 -#define CORE_VO_TYPE 3 -#define MAIN_VO_TYPE 4 -#define NBIT_VO_TYPE 5 -#define ARTS_VO_TYPE 10 -#define ACE_VO_TYPE 12 -#define ADV_SIMPLE_VO_TYPE 17 +#define SIMPLE_VO_TYPE 1 +#define CORE_VO_TYPE 3 +#define MAIN_VO_TYPE 4 +#define NBIT_VO_TYPE 5 +#define ARTS_VO_TYPE 10 +#define ACE_VO_TYPE 12 +#define ADV_SIMPLE_VO_TYPE 17 // aspect_ratio_info #define EXTENDED_PAR 15 @@ -88,15 +89,15 @@ extern const uint8_t ff_mpeg4_dc_threshold[8]; void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y); -void ff_mpeg4_pred_ac(MpegEncContext * s, int16_t *block, int n, +void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir); -void ff_set_mpeg4_time(MpegEncContext * s); +void ff_set_mpeg4_time(MpegEncContext *s); void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number); -int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb); +int ff_mpeg4_decode_picture_header(MpegEncContext *s, GetBitContext *gb); void ff_mpeg4_encode_video_packet_header(MpegEncContext *s); void ff_mpeg4_clean_buffers(MpegEncContext *s); -void ff_mpeg4_stuffing(PutBitContext * pbc); +void ff_mpeg4_stuffing(PutBitContext *pbc); void ff_mpeg4_init_partitions(MpegEncContext *s); void ff_mpeg4_merge_partitions(MpegEncContext *s); void ff_clean_mpeg4_qscales(MpegEncContext *s); @@ -112,8 +113,7 @@ void ff_mpeg4videodec_static_init(void); */ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my); -extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3]; - +extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2 * MAX_RUN + MAX_LEVEL + 3]; #if 0 //3IV1 is quite rare and it slows things down a tiny bit #define IS_3IV1 s->codec_tag == AV_RL32("3IV1") @@ -121,7 +121,6 @@ extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3]; #define IS_3IV1 0 #endif - /** * Predict the dc. * encoding quantized level -> quantized diff @@ -129,75 +128,81 @@ extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3]; * @param n block index (0-3 are luma, 4-5 are chroma) * @param dir_ptr pointer to an integer where the prediction direction will be stored */ -static inline int ff_mpeg4_pred_dc(MpegEncContext * s, int n, int level, int *dir_ptr, int encoding) +static inline int ff_mpeg4_pred_dc(MpegEncContext *s, int n, int level, + int *dir_ptr, int encoding) { int a, b, c, wrap, pred, scale, ret; int16_t *dc_val; /* find prediction */ - if (n < 4) { + if (n < 4) scale = s->y_dc_scale; - } else { + else scale = s->c_dc_scale; - } - if(IS_3IV1) - scale= 8; + if (IS_3IV1) + scale = 8; - wrap= s->block_wrap[n]; + wrap = s->block_wrap[n]; dc_val = s->dc_val[0] + s->block_index[n]; /* B C * A X */ - a = dc_val[ - 1]; - b = dc_val[ - 1 - wrap]; - c = dc_val[ - wrap]; - - /* outside slice handling (we can't do that by memset as we need the dc for error resilience) */ - if(s->first_slice_line && n!=3){ - if(n!=2) b=c= 1024; - if(n!=1 && s->mb_x == s->resync_mb_x) b=a= 1024; + a = dc_val[-1]; + b = dc_val[-1 - wrap]; + c = dc_val[-wrap]; + + /* outside slice handling (we can't do that by memset as we need the + * dc for error resilience) */ + if (s->first_slice_line && n != 3) { + if (n != 2) + b = c = 1024; + if (n != 1 && s->mb_x == s->resync_mb_x) + b = a = 1024; } - if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1){ - if(n==0 || n==4 || n==5) - b=1024; + if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1) { + if (n == 0 || n == 4 || n == 5) + b = 1024; } if (abs(a - b) < abs(b - c)) { - pred = c; + pred = c; *dir_ptr = 1; /* top */ } else { - pred = a; + pred = a; *dir_ptr = 0; /* left */ } /* we assume pred is positive */ pred = FASTDIV((pred + (scale >> 1)), scale); - if(encoding){ + if (encoding) { ret = level - pred; - }else{ + } else { level += pred; - ret= level; - if(s->err_recognition&(AV_EF_BITSTREAM|AV_EF_AGGRESSIVE)){ - if(level<0){ - av_log(s->avctx, AV_LOG_ERROR, "dc<0 at %dx%d\n", s->mb_x, s->mb_y); + ret = level; + if (s->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) { + if (level < 0) { + av_log(s->avctx, AV_LOG_ERROR, + "dc<0 at %dx%d\n", s->mb_x, s->mb_y); return -1; } - if(level*scale > 2048 + scale){ - av_log(s->avctx, AV_LOG_ERROR, "dc overflow at %dx%d\n", s->mb_x, s->mb_y); + if (level * scale > 2048 + scale) { + av_log(s->avctx, AV_LOG_ERROR, + "dc overflow at %dx%d\n", s->mb_x, s->mb_y); return -1; } } } - level *=scale; - if(level&(~2047)){ - if(level<0) - level=0; - else if(!(s->workaround_bugs&FF_BUG_DC_CLIP)) - level=2047; + level *= scale; + if (level & (~2047)) { + if (level < 0) + level = 0; + else if (!(s->workaround_bugs & FF_BUG_DC_CLIP)) + level = 2047; } - dc_val[0]= level; + dc_val[0] = level; return ret; } + #endif /* AVCODEC_MPEG4VIDEO_H */ |