diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2002-12-04 10:04:03 +0000 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2002-12-04 10:04:03 +0000 |
commit | 1e491e29c27cf6a6925666e4f4eac41b65e263d7 (patch) | |
tree | 99879470b8deeb55e7d88c62729b62ac27d249ee /libavcodec/h263.c | |
parent | 855ea723b0ea450137e54674179751c14e8fc6b5 (diff) | |
download | ffmpeg-1e491e29c27cf6a6925666e4f4eac41b65e263d7.tar.gz |
cleanup
adding AVVideoFrame
moving quality, pict_type, key_frame, qscale_table, ... to AVVideoFrame
removing obsolete variables in AVCodecContext
skiping of MBs in b frames
correctly initalizing AVCodecContext
picture buffer cleanup
Originally committed as revision 1302 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/h263.c')
-rw-r--r-- | libavcodec/h263.c | 87 |
1 files changed, 48 insertions, 39 deletions
diff --git a/libavcodec/h263.c b/libavcodec/h263.c index 0083ca280a..e6578a0c58 100644 --- a/libavcodec/h263.c +++ b/libavcodec/h263.c @@ -272,6 +272,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d { int score0=0, score1=0; int i, n; + int8_t * const qscale_table= s->current_picture.qscale_table; for(n=0; n<6; n++){ INT16 *ac_val, *ac_val1; @@ -282,7 +283,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d const int xy= s->mb_x + s->mb_y*s->mb_width - s->mb_width; /* top prediction */ ac_val-= s->block_wrap[n]*16; - if(s->mb_y==0 || s->qscale == s->qscale_table[xy] || n==2 || n==3){ + if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){ /* same qscale */ for(i=1; i<8; i++){ const int level= block[n][s->idct_permutation[i ]]; @@ -296,7 +297,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d for(i=1; i<8; i++){ const int level= block[n][s->idct_permutation[i ]]; score0+= ABS(level); - score1+= ABS(level - ROUNDED_DIV(ac_val[i + 8]*s->qscale_table[xy], s->qscale)); + score1+= ABS(level - ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale)); ac_val1[i ]= block[n][s->idct_permutation[i<<3]]; ac_val1[i+8]= level; } @@ -305,7 +306,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d const int xy= s->mb_x-1 + s->mb_y*s->mb_width; /* left prediction */ ac_val-= 16; - if(s->mb_x==0 || s->qscale == s->qscale_table[xy] || n==1 || n==3){ + if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){ /* same qscale */ for(i=1; i<8; i++){ const int level= block[n][s->idct_permutation[i<<3]]; @@ -319,7 +320,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d for(i=1; i<8; i++){ const int level= block[n][s->idct_permutation[i<<3]]; score0+= ABS(level); - score1+= ABS(level - ROUNDED_DIV(ac_val[i]*s->qscale_table[xy], s->qscale)); + score1+= ABS(level - ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale)); ac_val1[i ]= level; ac_val1[i+8]= block[n][s->idct_permutation[i ]]; } @@ -335,14 +336,15 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d */ void ff_clean_h263_qscales(MpegEncContext *s){ int i; + int8_t * const qscale_table= s->current_picture.qscale_table; for(i=1; i<s->mb_num; i++){ - if(s->qscale_table[i] - s->qscale_table[i-1] >2) - s->qscale_table[i]= s->qscale_table[i-1]+2; + if(qscale_table[i] - qscale_table[i-1] >2) + qscale_table[i]= qscale_table[i-1]+2; } for(i=s->mb_num-2; i>=0; i--){ - if(s->qscale_table[i] - s->qscale_table[i+1] >2) - s->qscale_table[i]= s->qscale_table[i+1]+2; + if(qscale_table[i] - qscale_table[i+1] >2) + qscale_table[i]= qscale_table[i+1]+2; } } @@ -351,11 +353,12 @@ void ff_clean_h263_qscales(MpegEncContext *s){ */ void ff_clean_mpeg4_qscales(MpegEncContext *s){ int i; - + int8_t * const qscale_table= s->current_picture.qscale_table; + ff_clean_h263_qscales(s); for(i=1; i<s->mb_num; i++){ - if(s->qscale_table[i] != s->qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_INTER4V)){ + if(qscale_table[i] != qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_INTER4V)){ s->mb_type[i]&= ~MB_TYPE_INTER4V; s->mb_type[i]|= MB_TYPE_INTER; } @@ -367,21 +370,21 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){ for the actual adaptive quantization */ for(i=0; i<s->mb_num; i++){ - odd += s->qscale_table[i]&1; + odd += qscale_table[i]&1; } if(2*odd > s->mb_num) odd=1; else odd=0; for(i=0; i<s->mb_num; i++){ - if((s->qscale_table[i]&1) != odd) - s->qscale_table[i]++; - if(s->qscale_table[i] > 31) - s->qscale_table[i]= 31; + if((qscale_table[i]&1) != odd) + qscale_table[i]++; + if(qscale_table[i] > 31) + qscale_table[i]= 31; } for(i=1; i<s->mb_num; i++){ - if(s->qscale_table[i] != s->qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_DIRECT)){ + if(qscale_table[i] != qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_DIRECT)){ s->mb_type[i]&= ~MB_TYPE_DIRECT; s->mb_type[i]|= MB_TYPE_BIDIR; } @@ -427,7 +430,7 @@ void mpeg4_encode_mb(MpegEncContext * s, assert(mb_type>=0); /* nothing to do if this MB was skiped in the next P Frame */ - if(s->mbskip_table[s->mb_y * s->mb_width + s->mb_x]){ //FIXME avoid DCT & ... + if(s->next_picture.mbskip_table[s->mb_y * s->mb_width + s->mb_x]){ //FIXME avoid DCT & ... s->skip_count++; s->mv[0][0][0]= s->mv[0][0][1]= @@ -435,6 +438,8 @@ void mpeg4_encode_mb(MpegEncContext * s, s->mv[1][0][1]= 0; s->mv_dir= MV_DIR_FORWARD; //doesnt matter s->qscale -= s->dquant; +// s->mb_skiped=1; + return; } @@ -451,6 +456,7 @@ void mpeg4_encode_mb(MpegEncContext * s, s->skip_count++; return; } + put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */ put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ //FIXME merge put_bits(&s->pb, mb_type+1, 1); // this table is so simple that we dont need it :) @@ -547,16 +553,17 @@ void mpeg4_encode_mb(MpegEncContext * s, if(y+16 > s->height) y= s->height-16; offset= x + y*s->linesize; - p_pic= s->new_picture[0] + offset; + p_pic= s->new_picture.data[0] + offset; s->mb_skiped=1; for(i=0; i<s->max_b_frames; i++){ uint8_t *b_pic; int diff; + Picture *pic= s->reordered_input_picture[i+1]; - if(s->coded_order[i+1].pict_type!=B_TYPE) break; + if(pic==NULL || pic->pict_type!=B_TYPE) break; - b_pic= s->coded_order[i+1].picture[0] + offset; + b_pic= pic->data[0] + offset + 16; //FIXME +16 diff= s->dsp.pix_abs16x16(p_pic, b_pic, s->linesize); if(diff>s->qscale*70){ //FIXME check that 70 is optimal s->mb_skiped=0; @@ -1493,8 +1500,8 @@ void ff_set_mpeg4_time(MpegEncContext * s, int picture_number){ s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1; } - if(s->avctx->pts) - s->time= (s->avctx->pts*s->time_increment_resolution + 500*1000)/(1000*1000); + if(s->current_picture.pts) + s->time= (s->current_picture.pts*s->time_increment_resolution + 500*1000)/(1000*1000); else s->time= picture_number*(INT64)FRAME_RATE_BASE*s->time_increment_resolution/s->frame_rate; time_div= s->time/s->time_increment_resolution; @@ -1736,6 +1743,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, { int i; INT16 *ac_val, *ac_val1; + int8_t * const qscale_table= s->current_picture.qscale_table; /* find prediction */ ac_val = s->ac_val[0][0] + s->block_index[n] * 16; @@ -1746,7 +1754,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, /* left prediction */ ac_val -= 16; - if(s->mb_x==0 || s->qscale == s->qscale_table[xy] || n==1 || n==3){ + if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){ /* same qscale */ for(i=1;i<8;i++) { block[s->idct_permutation[i<<3]] += ac_val[i]; @@ -1754,7 +1762,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, }else{ /* different qscale, we must rescale */ for(i=1;i<8;i++) { - block[s->idct_permutation[i<<3]] += ROUNDED_DIV(ac_val[i]*s->qscale_table[xy], s->qscale); + block[s->idct_permutation[i<<3]] += ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale); } } } else { @@ -1762,7 +1770,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, /* top prediction */ ac_val -= 16 * s->block_wrap[n]; - if(s->mb_y==0 || s->qscale == s->qscale_table[xy] || n==2 || n==3){ + if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){ /* same qscale */ for(i=1;i<8;i++) { block[s->idct_permutation[i]] += ac_val[i + 8]; @@ -1770,7 +1778,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, }else{ /* different qscale, we must rescale */ for(i=1;i<8;i++) { - block[s->idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8]*s->qscale_table[xy], s->qscale); + block[s->idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale); } } } @@ -1790,6 +1798,7 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, INT16 *block, int n, { int i; INT16 *ac_val; + int8_t * const qscale_table= s->current_picture.qscale_table; /* find prediction */ ac_val = s->ac_val[0][0] + s->block_index[n] * 16; @@ -1798,7 +1807,7 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, INT16 *block, int n, const int xy= s->mb_x-1 + s->mb_y*s->mb_width; /* left prediction */ ac_val -= 16; - if(s->mb_x==0 || s->qscale == s->qscale_table[xy] || n==1 || n==3){ + if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){ /* same qscale */ for(i=1;i<8;i++) { block[s->idct_permutation[i<<3]] -= ac_val[i]; @@ -1806,14 +1815,14 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, INT16 *block, int n, }else{ /* different qscale, we must rescale */ for(i=1;i<8;i++) { - block[s->idct_permutation[i<<3]] -= ROUNDED_DIV(ac_val[i]*s->qscale_table[xy], s->qscale); + block[s->idct_permutation[i<<3]] -= ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale); } } } else { const int xy= s->mb_x + s->mb_y*s->mb_width - s->mb_width; /* top prediction */ ac_val -= 16 * s->block_wrap[n]; - if(s->mb_y==0 || s->qscale == s->qscale_table[xy] || n==2 || n==3){ + if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){ /* same qscale */ for(i=1;i<8;i++) { block[s->idct_permutation[i]] -= ac_val[i + 8]; @@ -1821,7 +1830,7 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, INT16 *block, int n, }else{ /* different qscale, we must rescale */ for(i=1;i<8;i++) { - block[s->idct_permutation[i]] -= ROUNDED_DIV(ac_val[i + 8]*s->qscale_table[xy], s->qscale); + block[s->idct_permutation[i]] -= ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale); } } } @@ -2532,7 +2541,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){ if(cbpc & 4) { change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); } - s->qscale_table[xy]= s->qscale; + s->current_picture.qscale_table[xy]= s->qscale; s->mbintra_table[xy]= 1; for(i=0; i<6; i++){ @@ -2704,7 +2713,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ if(s->cbp_table[xy] & 8) { change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); } - s->qscale_table[xy]= s->qscale; + s->current_picture.qscale_table[xy]= s->qscale; for(i=0; i<6; i++){ int dc_pred_dir; @@ -2721,7 +2730,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ s->pred_dir_table[xy]= dir | (ac_pred<<7); s->error_status_table[xy]&= ~DC_ERROR; }else if(s->mb_type[xy]&MB_TYPE_SKIPED){ - s->qscale_table[xy]= s->qscale; + s->current_picture.qscale_table[xy]= s->qscale; s->cbp_table[xy]= 0; }else{ int cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); @@ -2734,7 +2743,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ if(s->cbp_table[xy] & 8) { change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); } - s->qscale_table[xy]= s->qscale; + s->current_picture.qscale_table[xy]= s->qscale; s->cbp_table[xy]&= 3; //remove dquant s->cbp_table[xy]|= (cbpy^0xf)<<2; @@ -2801,8 +2810,8 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) mb_type= s->mb_type[xy]; cbp = s->cbp_table[xy]; - if(s->qscale_table[xy] != s->qscale){ - s->qscale= s->qscale_table[xy]; + if(s->current_picture.qscale_table[xy] != s->qscale){ + s->qscale= s->current_picture.qscale_table[xy]; s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; } @@ -3054,7 +3063,7 @@ int ff_h263_decode_mb(MpegEncContext *s, } /* if we skipped it in the future P Frame than skip it now too */ - s->mb_skiped= s->mbskip_table[s->mb_y * s->mb_width + s->mb_x]; // Note, skiptab=0 if last was GMC + s->mb_skiped= s->next_picture.mbskip_table[s->mb_y * s->mb_width + s->mb_x]; // Note, skiptab=0 if last was GMC if(s->mb_skiped){ /* skip mb */ @@ -3287,7 +3296,7 @@ end: /* per-MB end of slice check */ if(s->codec_id==CODEC_ID_MPEG4){ if(mpeg4_is_resync(s)){ - if(s->pict_type==B_TYPE && s->mbskip_table[s->mb_y * s->mb_width + s->mb_x+1]) + if(s->pict_type==B_TYPE && s->next_picture.mbskip_table[s->mb_y * s->mb_width + s->mb_x+1]) return SLICE_OK; return SLICE_END; } @@ -4441,7 +4450,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2; } - s->avctx->pts= s->time*1000LL*1000LL / s->time_increment_resolution; + s->current_picture.pts= s->time*1000LL*1000LL / s->time_increment_resolution; if(check_marker(gb, "before vop_coded")==0 && s->picture_number==0){ printf("hmm, seems the headers arnt complete, trying to guess time_increment_bits\n"); |