diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2003-03-16 20:22:22 +0000 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2003-03-16 20:22:22 +0000 |
commit | b536d0aad2750d3a5f24520fccf1e48a46cad53b (patch) | |
tree | ef8d69520065d3c1009481e77b0ff8fd64b58959 | |
parent | 1c6dcb0f420c747b263d5c17641ae9c216c2e9de (diff) | |
download | ffmpeg-b536d0aad2750d3a5f24520fccf1e48a46cad53b.tar.gz |
field pic decoding cleanup
Originally committed as revision 1686 to svn://svn.ffmpeg.org/ffmpeg/trunk
-rw-r--r-- | libavcodec/avcodec.h | 10 | ||||
-rw-r--r-- | libavcodec/error_resilience.c | 2 | ||||
-rw-r--r-- | libavcodec/h263dec.c | 6 | ||||
-rw-r--r-- | libavcodec/mpeg12.c | 39 | ||||
-rw-r--r-- | libavcodec/mpegvideo.c | 143 | ||||
-rw-r--r-- | libavcodec/mpegvideo.h | 31 | ||||
-rw-r--r-- | libavcodec/svq1.c | 2 | ||||
-rw-r--r-- | libavcodec/utils.c | 6 |
8 files changed, 146 insertions, 93 deletions
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 47fc5efa15..42d69c9912 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -187,6 +187,12 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG, #define CODEC_CAP_TRUNCATED 0x0008 #define FF_COMMON_FRAME \ + /**\ + * pointer to the picture planes.\ + * this might be different from the first allocated byte\ + * - encoding: \ + * - decoding: \ + */\ uint8_t *data[4];\ int linesize[4];\ /**\ @@ -306,8 +312,8 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG, #define FF_BUFFER_TYPE_INTERNAL 1 -#define FF_BUFFER_TYPE_USER 2 // Direct rendering buffers -#define FF_BUFFER_TYPE_SHARED 4 // input frame for encoding(wont be dealloced) +#define FF_BUFFER_TYPE_USER 2 ///< Direct rendering buffers +#define FF_BUFFER_TYPE_SHARED 4 ///< buffer from somewher else, dont dealloc #define FF_I_TYPE 1 // Intra diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c index d58cfb0304..bf399634e6 100644 --- a/libavcodec/error_resilience.c +++ b/libavcodec/error_resilience.c @@ -534,7 +534,7 @@ score_sum+= best_score; static int is_intra_more_likely(MpegEncContext *s){ int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y; - if(s->last_picture.data[0]==NULL) return 1; //no previous frame available -> use spatial prediction + if(s->last_picture_ptr==NULL) return 1; //no previous frame available -> use spatial prediction undamaged_count=0; for(i=0; i<s->mb_num; i++){ diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c index bd0f7ff97d..bab06c9bca 100644 --- a/libavcodec/h263dec.c +++ b/libavcodec/h263dec.c @@ -621,7 +621,7 @@ retry: s->current_picture.key_frame= s->pict_type == I_TYPE; /* skip b frames if we dont have reference frames */ - if(s->last_picture.data[0]==NULL && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); + if(s->last_picture_ptr==NULL && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); /* skip b frames if we are in a hurry */ if(avctx->hurry_up && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); /* skip everything if we are in a hurry>=5 */ @@ -731,7 +731,7 @@ retry: MPV_frame_end(s); - if((avctx->debug&FF_DEBUG_VIS_MV) && s->last_picture.data[0]){ + if((avctx->debug&FF_DEBUG_VIS_MV) && s->last_picture_ptr){ const int shift= 1 + s->quarter_sample; int mb_y; uint8_t *ptr= s->last_picture.data[0]; @@ -789,7 +789,7 @@ retry: avctx->frame_number = s->picture_number - 1; /* dont output the last pic after seeking */ - if(s->last_picture.data[0] || s->low_delay) + if(s->last_picture_ptr || s->low_delay) *data_size = sizeof(AVFrame); #ifdef PRINT_FRAME_TIME printf("%Ld\n", rdtsc()-time); diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c index 65292ac430..37d9243029 100644 --- a/libavcodec/mpeg12.c +++ b/libavcodec/mpeg12.c @@ -229,7 +229,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) put_bits(&s->pb, 4, s->aspect_ratio_info); put_bits(&s->pb, 4, s->frame_rate_index); - v = s->bit_rate / 400; + v = (s->bit_rate + 399) / 400; if (v > 0x3ffff) v = 0x3ffff; put_bits(&s->pb, 18, v); @@ -1803,7 +1803,8 @@ static int mpeg_decode_slice(AVCodecContext *avctx, memset(s->last_mv, 0, sizeof(s->last_mv)); /* start frame decoding */ - if (s->first_slice && (s->first_field || s->picture_structure==PICT_FRAME)) { + if (s->first_slice) { + if(s->first_field || s->picture_structure==PICT_FRAME){ if(MPV_frame_start(s, avctx) < 0) return DECODE_SLICE_FATAL_ERROR; /* first check if we must repeat the frame */ @@ -1829,6 +1830,15 @@ static int mpeg_decode_slice(AVCodecContext *avctx, s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors, s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :""); } + }else{ //second field + int i; + for(i=0; i<4; i++){ + s->current_picture.data[i] = s->current_picture_ptr->data[i]; + if(s->picture_structure == PICT_BOTTOM_FIELD){ + s->current_picture.data[i] += s->current_picture_ptr->linesize[i]; + } + } + } } s->first_slice = 0; @@ -1865,27 +1875,8 @@ static int mpeg_decode_slice(AVCodecContext *avctx, dprintf("ret=%d\n", ret); if (ret < 0) return -1; -//printf("%d %d\n", s->mb_x, s->mb_y); - //FIXME this isnt the most beautifull way to solve the problem ... - if(s->picture_structure!=PICT_FRAME){ - if(s->picture_structure == PICT_BOTTOM_FIELD){ - s->current_picture.data[0] += s->linesize; - s->current_picture.data[1] += s->uvlinesize; - s->current_picture.data[2] += s->uvlinesize; - } - s->linesize *= 2; - s->uvlinesize *= 2; - } + MPV_decode_mb(s, s->block); - if(s->picture_structure!=PICT_FRAME){ - s->linesize /= 2; - s->uvlinesize /= 2; - if(s->picture_structure == PICT_BOTTOM_FIELD){ - s->current_picture.data[0] -= s->linesize; - s->current_picture.data[1] -= s->uvlinesize; - s->current_picture.data[2] -= s->uvlinesize; - } - } if (++s->mb_x >= s->mb_width) { if(s->picture_structure==PICT_FRAME){ @@ -1945,7 +1936,7 @@ eos: //end of slice s->picture_number++; /* latency of 1 frame for I and P frames */ /* XXX: use another variable than picture_number */ - if (s->last_picture.data[0] == NULL) { + if (s->last_picture_ptr == NULL) { return DECODE_SLICE_OK; } else { *pict= *(AVFrame*)&s->last_picture; @@ -2196,7 +2187,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, start_code <= SLICE_MAX_START_CODE) { /* skip b frames if we dont have reference frames */ - if(s2->last_picture.data[0]==NULL && s2->pict_type==B_TYPE) break; + if(s2->last_picture_ptr==NULL && s2->pict_type==B_TYPE) break; /* skip b frames if we are in a hurry */ if(avctx->hurry_up && s2->pict_type==B_TYPE) break; /* skip everything if we are in a hurry>=5 */ diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index ca0169509f..602063e49f 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -858,47 +858,58 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) s->mb_skiped = 0; /* mark&release old frames */ - if (s->pict_type != B_TYPE && s->last_picture.data[0]) { - for(i=0; i<MAX_PICTURE_COUNT; i++){ -//printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]); - if(s->picture[i].data[0] == s->last_picture.data[0]){ -// s->picture[i].reference=0; - avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); - break; - } - } - assert(i<MAX_PICTURE_COUNT); + if (s->pict_type != B_TYPE && s->last_picture_ptr) { + avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr); /* release forgotten pictures */ /* if(mpeg124/h263) */ if(!s->encoding){ for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){ + if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){ fprintf(stderr, "releasing zombie picture\n"); avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); } } } } + alloc: if(!s->encoding){ i= find_unused_picture(s, 0); pic= (AVFrame*)&s->picture[i]; pic->reference= s->pict_type != B_TYPE; - pic->coded_picture_number= s->current_picture.coded_picture_number+1; + + if(s->current_picture_ptr) + pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1; alloc_picture(s, (Picture*)pic, 0); - s->current_picture= s->picture[i]; + s->current_picture_ptr= &s->picture[i]; } if (s->pict_type != B_TYPE) { - s->last_picture= s->next_picture; - s->next_picture= s->current_picture; + s->last_picture_ptr= s->next_picture_ptr; + s->next_picture_ptr= s->current_picture_ptr; + } + s->current_picture= *s->current_picture_ptr; + if(s->last_picture_ptr) s->last_picture= *s->last_picture_ptr; + if(s->next_picture_ptr) s->next_picture= *s->next_picture_ptr; + if(s->new_picture_ptr ) s->new_picture = *s->new_picture_ptr; + + if(s->picture_structure!=PICT_FRAME){ + int i; + for(i=0; i<4; i++){ + if(s->picture_structure == PICT_BOTTOM_FIELD){ + s->current_picture.data[i] += s->current_picture.linesize[i]; + } + s->current_picture.linesize[i] *= 2; + s->last_picture.linesize[i] *=2; + s->next_picture.linesize[i] *=2; + } } - if(s->pict_type != I_TYPE && s->last_picture.data[0]==NULL){ + if(s->pict_type != I_TYPE && s->last_picture_ptr==NULL){ fprintf(stderr, "warning: first frame is no keyframe\n"); assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference goto alloc; @@ -938,12 +949,8 @@ void MPV_frame_end(MpegEncContext *s) if(s->pict_type!=B_TYPE){ s->last_non_b_pict_type= s->pict_type; } - - s->current_picture.quality= s->qscale; //FIXME get average of qscale_table - s->current_picture.pict_type= s->pict_type; - s->current_picture.key_frame= s->pict_type == I_TYPE; - - /* copy back current_picture variables */ +#if 0 + /* copy back current_picture variables */ for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0] == s->current_picture.data[0]){ s->picture[i]= s->current_picture; @@ -951,6 +958,10 @@ void MPV_frame_end(MpegEncContext *s) } } assert(i<MAX_PICTURE_COUNT); +#endif + s->current_picture_ptr->quality= s->qscale; //FIXME get average of qscale_table + s->current_picture_ptr->pict_type= s->pict_type; + s->current_picture_ptr->key_frame= s->pict_type == I_TYPE; /* release non refernce frames */ for(i=0; i<MAX_PICTURE_COUNT; i++){ @@ -969,6 +980,13 @@ void MPV_frame_end(MpegEncContext *s) } printf("pict type: %d\n", s->pict_type); } + + // clear copies, to avoid confusion +#if 0 + memset(&s->last_picture, 0, sizeof(Picture)); + memset(&s->next_picture, 0, sizeof(Picture)); + memset(&s->current_picture, 0, sizeof(Picture)); +#endif } #ifdef CONFIG_ENCODERS @@ -1038,6 +1056,12 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ pic->reference= 1; alloc_picture(s, (Picture*)pic, 0); + for(i=0; i<4; i++){ + /* the input will be 16 pixels to the right relative to the actual buffer start + * and the current_pic, so the buffer can be reused, yes its not beatifull + */ + pic->data[i]+= 16; + } if( pic->data[0] == pic_arg->data[0] && pic->data[1] == pic_arg->data[1] @@ -1100,7 +1124,7 @@ static void select_input_picture(MpegEncContext *s){ /* set next picture types & ordering */ if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ - if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture.data[0]==NULL || s->intra_only){ + if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){ s->reordered_input_picture[0]= s->input_picture[0]; s->reordered_input_picture[0]->pict_type= I_TYPE; s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; @@ -1174,19 +1198,22 @@ static void select_input_picture(MpegEncContext *s){ } if(s->reordered_input_picture[0]){ - s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE; + s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE; + + s->new_picture= *s->reordered_input_picture[0]; if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ + // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable + int i= find_unused_picture(s, 0); Picture *pic= &s->picture[i]; - s->new_picture= *s->reordered_input_picture[0]; - /* mark us unused / free shared pic */ for(i=0; i<4; i++) s->reordered_input_picture[0]->data[i]= NULL; s->reordered_input_picture[0]->type= 0; + //FIXME bad, copy * except pic->pict_type = s->reordered_input_picture[0]->pict_type; pic->quality = s->reordered_input_picture[0]->quality; pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number; @@ -1194,18 +1221,20 @@ static void select_input_picture(MpegEncContext *s){ alloc_picture(s, pic, 0); - s->current_picture= *pic; + s->current_picture_ptr= pic; }else{ + // input is not a shared pix -> reuse buffer for current_pix + assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL); - s->new_picture= *s->reordered_input_picture[0]; - + s->current_picture_ptr= s->reordered_input_picture[0]; for(i=0; i<4; i++){ - s->reordered_input_picture[0]->data[i]-=16; //FIXME dirty + //reverse the +16 we did before storing the input + s->current_picture_ptr->data[i]-=16; } - s->current_picture= *s->reordered_input_picture[0]; } + s->current_picture= *s->current_picture_ptr; s->picture_number= s->new_picture.display_picture_number; //printf("dpn:%d\n", s->picture_number); @@ -1260,6 +1289,10 @@ int MPV_encode_picture(AVCodecContext *avctx, if(s->flags&CODEC_FLAG_PASS1) ff_write_pass1_stats(s); + + for(i=0; i<4; i++){ + avctx->error[i] += s->current_picture_ptr->error[i]; + } } s->input_picture_number++; @@ -1269,10 +1302,6 @@ int MPV_encode_picture(AVCodecContext *avctx, s->total_bits += s->frame_bits; avctx->frame_bits = s->frame_bits; - - for(i=0; i<4; i++){ - avctx->error[i] += s->current_picture.error[i]; - } return pbBufPtr(&s->pb) - s->pb.buf; } @@ -1523,15 +1552,15 @@ if(s->quarter_sample) src_y = clip(src_y, -16, height); if (src_y == height) dxy &= ~2; - linesize = s->linesize << field_based; - uvlinesize = s->uvlinesize << field_based; + linesize = s->current_picture.linesize[0] << field_based; + uvlinesize = s->current_picture.linesize[1] << field_based; ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset; dest_y += dest_offset; if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos || src_y + (motion_y&1) + h > v_edge_pos){ - ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, + ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer + src_offset; emu=1; @@ -1864,10 +1893,10 @@ static inline void MPV_motion(MpegEncContext *s, } else { int offset; if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){ - offset= s->field_select[dir][0] ? s->linesize/2 : 0; + offset= s->field_select[dir][0] ? s->linesize : 0; }else{ ref_picture= s->current_picture.data; - offset= s->field_select[dir][0] ? s->linesize/2 : -s->linesize/2; + offset= s->field_select[dir][0] ? s->linesize : -s->linesize; } mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, @@ -2023,6 +2052,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) int dct_linesize, dct_offset; op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; + const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics + const int uvlinesize= s->current_picture.linesize[1]; /* avoid copy if macroblock skipped in last frame too */ if (s->pict_type != B_TYPE) { @@ -2061,17 +2092,17 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) dest_cb = s->current_picture.data[1] + mb_x * 8; dest_cr = s->current_picture.data[2] + mb_x * 8; }else{ - dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16; - dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; - dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; + dest_y = s->current_picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16; + dest_cb = s->current_picture.data[1] + (mb_y * 8 * uvlinesize) + mb_x * 8; + dest_cr = s->current_picture.data[2] + (mb_y * 8 * uvlinesize) + mb_x * 8; } if (s->interlaced_dct) { - dct_linesize = s->linesize * 2; - dct_offset = s->linesize; + dct_linesize = linesize * 2; + dct_offset = linesize; } else { - dct_linesize = s->linesize; - dct_offset = s->linesize * 8; + dct_linesize = linesize; + dct_offset = linesize * 8; } if (!s->mb_intra) { @@ -2108,8 +2139,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); if(!(s->flags&CODEC_FLAG_GRAY)){ - add_dequant_dct(s, block[4], 4, dest_cb, s->uvlinesize); - add_dequant_dct(s, block[5], 5, dest_cr, s->uvlinesize); + add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize); + add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize); } } else if(s->codec_id != CODEC_ID_WMV2){ add_dct(s, block[0], 0, dest_y, dct_linesize); @@ -2118,8 +2149,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); if(!(s->flags&CODEC_FLAG_GRAY)){ - add_dct(s, block[4], 4, dest_cb, s->uvlinesize); - add_dct(s, block[5], 5, dest_cr, s->uvlinesize); + add_dct(s, block[4], 4, dest_cb, uvlinesize); + add_dct(s, block[5], 5, dest_cr, uvlinesize); } } #ifdef CONFIG_RISKY @@ -2136,8 +2167,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); if(!(s->flags&CODEC_FLAG_GRAY)){ - put_dct(s, block[4], 4, dest_cb, s->uvlinesize); - put_dct(s, block[5], 5, dest_cr, s->uvlinesize); + put_dct(s, block[4], 4, dest_cb, uvlinesize); + put_dct(s, block[5], 5, dest_cr, uvlinesize); } }else{ s->dsp.idct_put(dest_y , dct_linesize, block[0]); @@ -2146,8 +2177,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]); if(!(s->flags&CODEC_FLAG_GRAY)){ - s->dsp.idct_put(dest_cb, s->uvlinesize, block[4]); - s->dsp.idct_put(dest_cr, s->uvlinesize, block[5]); + s->dsp.idct_put(dest_cb, uvlinesize, block[4]); + s->dsp.idct_put(dest_cr, uvlinesize, block[5]); } } } @@ -2300,7 +2331,7 @@ static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move */ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ if ( s->avctx->draw_horiz_band - && (s->last_picture.data[0] || s->low_delay) ) { + && (s->last_picture_ptr || s->low_delay) ) { uint8_t *src_ptr[3]; int offset; h= FFMIN(h, s->height - y); diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index 51d57e9ad4..846d1ab3c0 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -238,10 +238,35 @@ typedef struct MpegEncContext { Picture picture[MAX_PICTURE_COUNT]; ///< main picture buffer Picture *input_picture[MAX_PICTURE_COUNT]; ///< next pictures on display order for encoding Picture *reordered_input_picture[MAX_PICTURE_COUNT]; ///< pointer to the next pictures in codedorder for encoding - Picture last_picture; ///< previous picture - Picture next_picture; ///< previous picture (for bidir pred) - Picture new_picture; ///< source picture for encoding + + /** + * copy of the previous picture structure. + * note, linesize & data, might not match the previous picture (for field pictures) + */ + Picture last_picture; + + /** + * copy of the next picture structure. + * note, linesize & data, might not match the next picture (for field pictures) + */ + Picture next_picture; + + /** + * copy of the source picture structure for encoding. + * note, linesize & data, might not match the source picture (for field pictures) + */ + Picture new_picture; + + /** + * copy of the current picture structure. + * note, linesize & data, might not match the current picture (for field pictures) + */ Picture current_picture; ///< buffer to store the decompressed current picture + + Picture *last_picture_ptr; ///< pointer to the previous picture. + Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred) + Picture *new_picture_ptr; ///< pointer to the source picture for encoding + Picture *current_picture_ptr; ///< pointer to the current picture int last_dc[3]; ///< last DC values for MPEG1 int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous int y_dc_scale, c_dc_scale; diff --git a/libavcodec/svq1.c b/libavcodec/svq1.c index ab2f8e855f..dbebde0f7d 100644 --- a/libavcodec/svq1.c +++ b/libavcodec/svq1.c @@ -1126,7 +1126,7 @@ static int svq1_decode_frame(AVCodecContext *avctx, //FIXME this avoids some confusion for "B frames" without 2 references //this should be removed after libavcodec can handle more flaxible picture types & ordering - if(s->pict_type==B_TYPE && s->last_picture.data[0]==NULL) return buf_size; + if(s->pict_type==B_TYPE && s->last_picture_ptr==NULL) return buf_size; if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size; diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 29efd04c38..a8577be086 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -192,9 +192,9 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ memset(pic->base[i], 128, pic->linesize[i]*h>>v_shift); if(s->flags&CODEC_FLAG_EMU_EDGE) - pic->data[i] = pic->base[i] + 16; //FIXME 16 + pic->data[i] = pic->base[i]; else - pic->data[i] = pic->base[i] + (pic->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift) + 16; //FIXME 16 + pic->data[i] = pic->base[i] + (pic->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift); opaque->data[i]= pic->data[i]; } @@ -581,7 +581,7 @@ void avcodec_flush_buffers(AVCodecContext *avctx) || s->picture[i].type == FF_BUFFER_TYPE_USER)) avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); } - s->last_picture.data[0] = s->next_picture.data[0] = NULL; + s->last_picture_ptr = s->next_picture_ptr = NULL; break; default: //FIXME |