aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/mpegvideo.c
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2003-07-29 02:09:12 +0000
committerMichael Niedermayer <michaelni@gmx.at>2003-07-29 02:09:12 +0000
commit7d1c3fc1d683d53982a1dec14f0f9749a07ae48d (patch)
tree23a9984eef372f9710641e2cb6f302ea9af1727f /libavcodec/mpegvideo.c
parent80adda8efd35c920c7ba0c43d530102a2fd22bc3 (diff)
downloadffmpeg-7d1c3fc1d683d53982a1dec14f0f9749a07ae48d.tar.gz
rate distortion mb decision support
fix decoding of old %16!=0 divx fix assertion failure in motion_est.c Originally committed as revision 2094 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/mpegvideo.c')
-rw-r--r--libavcodec/mpegvideo.c221
1 files changed, 116 insertions, 105 deletions
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index af6dd611e5..6a3b5c7185 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -50,6 +50,7 @@ static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
#ifdef CONFIG_ENCODERS
static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
+static int sse_mb(MpegEncContext *s);
#endif //CONFIG_ENCODERS
#ifdef HAVE_XVMC
@@ -2425,50 +2426,6 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
else if (s->h263_pred || s->h263_aic)
s->mbintra_table[mb_xy]=1;
- /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
- if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
- //FIXME a lot of thet is only needed for !low_delay
- const int wrap = s->block_wrap[0];
- const int xy = s->block_index[0];
- if(s->mv_type != MV_TYPE_8X8){
- int motion_x, motion_y;
- if (s->mb_intra) {
- motion_x = 0;
- motion_y = 0;
- } else if (s->mv_type == MV_TYPE_16X16) {
- motion_x = s->mv[0][0][0];
- motion_y = s->mv[0][0][1];
- } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
- int i;
- motion_x = s->mv[0][0][0] + s->mv[0][1][0];
- motion_y = s->mv[0][0][1] + s->mv[0][1][1];
- motion_x = (motion_x>>1) | (motion_x&1);
- for(i=0; i<2; i++){
- s->field_mv_table[mb_xy][i][0]= s->mv[0][i][0];
- s->field_mv_table[mb_xy][i][1]= s->mv[0][i][1];
- s->field_select_table[mb_xy][i]= s->field_select[0][i];
- }
- }
-
- /* no update if 8X8 because it has been done during parsing */
- s->motion_val[xy][0] = motion_x;
- s->motion_val[xy][1] = motion_y;
- s->motion_val[xy + 1][0] = motion_x;
- s->motion_val[xy + 1][1] = motion_y;
- s->motion_val[xy + wrap][0] = motion_x;
- s->motion_val[xy + wrap][1] = motion_y;
- s->motion_val[xy + 1 + wrap][0] = motion_x;
- s->motion_val[xy + 1 + wrap][1] = motion_y;
- }
-
- if(s->encoding){ //FIXME encoding MUST be cleaned up
- if (s->mv_type == MV_TYPE_8X8)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
- else
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
- }
- }
-
if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
uint8_t *dest_y, *dest_cb, *dest_cr;
int dct_linesize, dct_offset;
@@ -2478,10 +2435,6 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
const int uvlinesize= s->current_picture.linesize[1];
/* avoid copy if macroblock skipped in last frame too */
- if (s->pict_type != B_TYPE) {
- s->current_picture.mbskip_table[mb_xy]= s->mb_skiped;
- }
-
/* skip only during decoding as we might trash the buffers during encoding a bit */
if(!s->encoding){
uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
@@ -2506,17 +2459,6 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
} else{
*mbskip_ptr = 0; /* not skipped */
}
- }else
- s->mb_skiped= 0;
-
- if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){ //FIXME precalc
- dest_y = s->current_picture.data[0] + mb_x * 16;
- dest_cb = s->current_picture.data[1] + mb_x * 8;
- dest_cr = s->current_picture.data[2] + mb_x * 8;
- }else{
- dest_y = s->current_picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
- dest_cb = s->current_picture.data[1] + (mb_y * 8 * uvlinesize) + mb_x * 8;
- dest_cr = s->current_picture.data[2] + (mb_y * 8 * uvlinesize) + mb_x * 8;
}
if (s->interlaced_dct) {
@@ -2526,11 +2468,15 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
dct_linesize = linesize;
dct_offset = linesize * 8;
}
+
+ dest_y= s->dest[0];
+ dest_cb= s->dest[1];
+ dest_cr= s->dest[2];
if (!s->mb_intra) {
/* motion handling */
/* decoding or more than one mb_type (MC was allready done otherwise) */
- if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
+ if(!s->encoding){
if ((!s->no_rounding) || s->pict_type==B_TYPE){
op_pix = s->dsp.put_pixels_tab;
op_qpix= s->dsp.put_qpel_pixels_tab;
@@ -2791,6 +2737,28 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
}
}
+void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
+ const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
+ const int uvlinesize= s->current_picture.linesize[1];
+
+ s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
+ s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1) + s->mb_x*2;
+ s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2;
+ s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2) + s->mb_x*2;
+ s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
+ s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
+
+ if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){
+ s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16;
+ s->dest[1] = s->current_picture.data[1] + s->mb_x * 8 - 8;
+ s->dest[2] = s->current_picture.data[2] + s->mb_x * 8 - 8;
+ }else{
+ s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16 - 16;
+ s->dest[1] = s->current_picture.data[1] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
+ s->dest[2] = s->current_picture.data[2] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
+ }
+}
+
#ifdef CONFIG_ENCODERS
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
@@ -2885,9 +2853,9 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int wrap_y, wrap_c;
int emu=0;
- dest_y = s->current_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
- dest_cb = s->current_picture.data[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
- dest_cr = s->current_picture.data[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
+ dest_y = s->dest[0];
+ dest_cb = s->dest[1];
+ dest_cr = s->dest[2];
wrap_y = s->linesize;
wrap_c = s->uvlinesize;
ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
@@ -3161,7 +3129,7 @@ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext
d->misc_bits= s->misc_bits;
d->last_bits= 0;
- d->mb_skiped= s->mb_skiped;
+ d->mb_skiped= 0;
d->qscale= s->qscale;
}
@@ -3206,7 +3174,8 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
int *dmin, int *next_block, int motion_x, int motion_y)
{
- int bits_count;
+ int score;
+ uint8_t *dest_backup[3];
copy_context_before_encode(s, backup, type);
@@ -3216,24 +3185,44 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
s->pb2 = pb2 [*next_block];
s->tex_pb= tex_pb[*next_block];
}
+
+ if(*next_block){
+ memcpy(dest_backup, s->dest, sizeof(s->dest));
+ s->dest[0] = s->me.scratchpad;
+ s->dest[1] = s->me.scratchpad + 16;
+ s->dest[2] = s->me.scratchpad + 16 + 8;
+ assert(2*s->uvlinesize == s->linesize); //should be no prob for encoding
+ assert(s->linesize >= 64); //FIXME
+ }
encode_mb(s, motion_x, motion_y);
-
- bits_count= get_bit_count(&s->pb);
+
+ score= get_bit_count(&s->pb);
if(s->data_partitioning){
- bits_count+= get_bit_count(&s->pb2);
- bits_count+= get_bit_count(&s->tex_pb);
+ score+= get_bit_count(&s->pb2);
+ score+= get_bit_count(&s->tex_pb);
}
+
+ if(s->avctx->mb_decision == FF_MB_DECISION_RD){
+ MPV_decode_mb(s, s->block);
- if(bits_count<*dmin){
- *dmin= bits_count;
+ score *= s->qscale * s->qscale * 109;
+ score += sse_mb(s) << 7;
+ }
+
+ if(*next_block){
+ memcpy(s->dest, dest_backup, sizeof(s->dest));
+ }
+
+ if(score<*dmin){
+ *dmin= score;
*next_block^=1;
copy_context_after_encode(best, s, type);
}
}
-static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
+static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
uint32_t *sq = squareTbl + 256;
int acc=0;
int x,y;
@@ -3254,6 +3243,23 @@ static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, in
return acc;
}
+static int sse_mb(MpegEncContext *s){
+ int w= 16;
+ int h= 16;
+
+ if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
+ if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
+
+ if(w==16 && h==16)
+ return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize)
+ +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize)
+ +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize);
+ else
+ return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
+ +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
+ +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
+}
+
static void encode_picture(MpegEncContext *s, int picture_number)
{
int mb_x, mb_y, pdif = 0;
@@ -3508,29 +3514,21 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->first_slice_line = 1;
s->ptr_lastgob = s->pb.buf;
for(mb_y=0; mb_y < s->mb_height; mb_y++) {
+ s->mb_x=0;
+ s->mb_y= mb_y;
+
s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
+ ff_init_block_index(s);
- s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
- s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
- s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
- s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
- s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2);
- s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
const int xy= mb_y*s->mb_stride + mb_x;
int mb_type= s->mb_type[xy];
// int d;
- int dmin=10000000;
+ int dmin= INT_MAX;
s->mb_x = mb_x;
- s->mb_y = mb_y;
- s->block_index[0]+=2;
- s->block_index[1]+=2;
- s->block_index[2]+=2;
- s->block_index[3]+=2;
- s->block_index[4]++;
- s->block_index[5]++;
+ ff_update_block_index(s);
/* write gob / video packet header */
#ifdef CONFIG_RISKY
@@ -3588,6 +3586,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->first_slice_line=0;
}
+ s->mb_skiped=0;
+
if(mb_type & (mb_type-1)){ // more than 1 MB type possible
int next_block=0;
int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
@@ -3670,9 +3670,12 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->mv[0][0][1] = 0;
encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb,
&dmin, &next_block, 0, 0);
- /* force cleaning of ac/dc pred stuff if needed ... */
- if(s->h263_pred || s->h263_aic)
- s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
+ if(s->h263_pred || s->h263_aic){
+ if(best_s.mb_intra)
+ s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
+ else
+ ff_clean_intra_table_entries(s); //old mode?
+ }
}
copy_context_after_encode(s, &best_s, -1);
@@ -3693,12 +3696,24 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->tex_pb= backup_s.tex_pb;
}
s->last_bits= get_bit_count(&s->pb);
+
+ if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
+ ff_h263_update_motion_val(s);
+
+ if(next_block==0){
+ s->dsp.put_pixels_tab[0][0](s->dest[0], s->me.scratchpad , s->linesize ,16);
+ s->dsp.put_pixels_tab[1][0](s->dest[1], s->me.scratchpad + 16, s->uvlinesize, 8);
+ s->dsp.put_pixels_tab[1][0](s->dest[2], s->me.scratchpad + 24, s->uvlinesize, 8);
+ }
+
+ if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
+ MPV_decode_mb(s, s->block);
} else {
int motion_x, motion_y;
int intra_score;
int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
- if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){
+ if(s->avctx->mb_decision==FF_MB_DECISION_SIMPLE && s->pict_type==P_TYPE){ //FIXME check if the mess is usefull at all
/* get luma score */
if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
@@ -3846,6 +3861,11 @@ static void encode_picture(MpegEncContext *s, int picture_number)
// RAL: Update last macrobloc type
s->last_mv_dir = s->mv_dir;
+
+ if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
+ ff_h263_update_motion_val(s);
+
+ MPV_decode_mb(s, s->block);
}
/* clean the MV table in IPS frames for direct mode in B frames */
@@ -3853,8 +3873,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->p_mv_table[xy][0]=0;
s->p_mv_table[xy][1]=0;
}
-
- MPV_decode_mb(s, s->block);
if(s->flags&CODEC_FLAG_PSNR){
int w= 16;
@@ -3864,20 +3882,14 @@ static void encode_picture(MpegEncContext *s, int picture_number)
if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
s->current_picture_ptr->error[0] += sse(
- s,
- s->new_picture .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
- s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
- w, h, s->linesize);
+ s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
+ s->dest[0], w, h, s->linesize);
s->current_picture_ptr->error[1] += sse(
- s,
- s->new_picture .data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- s->current_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- w>>1, h>>1, s->uvlinesize);
+ s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
+ s->dest[1], w>>1, h>>1, s->uvlinesize);
s->current_picture_ptr->error[2] += sse(
- s,
- s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- s->current_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- w>>1, h>>1, s->uvlinesize);
+ s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
+ s->dest[2], w>>1, h>>1, s->uvlinesize);
}
//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
}
@@ -4402,7 +4414,6 @@ static void dct_unquantize_h263_c(MpegEncContext *s,
static const AVOption mpeg4_options[] =
{
AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
- AVOPTION_CODEC_FLAG("vhq", "very high quality", flags, CODEC_FLAG_HQ, 0),
AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
"the reference can be CBR (for CBR pass1) or VBR (for pass2)",
bit_rate_tolerance, 4, 240000000, 8000),