aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/mpeg12.c
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2011-07-06 20:08:30 +0200
committerDiego Biurrun <diego@biurrun.de>2011-07-11 00:19:00 +0200
commit657ccb5ac75ce34e62bd67f228d9bd36db72189e (patch)
treee490b0d6b4ff93490f70d4f67a0551dd187147e3 /libavcodec/mpeg12.c
parent142e76f1055de5dde44696e71a5f63f2cb11dedf (diff)
downloadffmpeg-657ccb5ac75ce34e62bd67f228d9bd36db72189e.tar.gz
Eliminate FF_COMMON_FRAME macro.
FF_COMMON_FRAME holds the contents of the AVFrame structure and is also copied to struct Picture. Replace by an embedded AVFrame structure in struct Picture.
Diffstat (limited to 'libavcodec/mpeg12.c')
-rw-r--r--libavcodec/mpeg12.c50
1 files changed, 25 insertions, 25 deletions
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index 986cad5fd2..4cbfa8d636 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -221,20 +221,20 @@ static int mpeg_decode_mb(MpegEncContext *s,
if (s->mb_skip_run-- != 0) {
if (s->pict_type == AV_PICTURE_TYPE_P) {
s->mb_skipped = 1;
- s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
+ s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
} else {
int mb_type;
if(s->mb_x)
- mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
+ mb_type = s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
else
- mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
+ mb_type = s->current_picture.f.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
if(IS_INTRA(mb_type))
return -1;
- s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
+ s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
mb_type | MB_TYPE_SKIP;
-// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
+// assert(s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
s->mb_skipped = 1;
@@ -577,7 +577,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
}
}
- s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
+ s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
return 0;
}
@@ -1385,8 +1385,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[1][0] = f_code;
s->mpeg_f_code[1][1] = f_code;
}
- s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
+ s->current_picture.f.pict_type = s->pict_type;
+ s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
if(avctx->debug & FF_DEBUG_PICT_INFO)
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
@@ -1539,8 +1539,8 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
s->pict_type= AV_PICTURE_TYPE_P;
}else
s->pict_type= AV_PICTURE_TYPE_B;
- s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
+ s->current_picture.f.pict_type = s->pict_type;
+ s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
}
s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2);
@@ -1618,19 +1618,19 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
ff_er_frame_start(s);
/* first check if we must repeat the frame */
- s->current_picture_ptr->repeat_pict = 0;
+ s->current_picture_ptr->f.repeat_pict = 0;
if (s->repeat_first_field) {
if (s->progressive_sequence) {
if (s->top_field_first)
- s->current_picture_ptr->repeat_pict = 4;
+ s->current_picture_ptr->f.repeat_pict = 4;
else
- s->current_picture_ptr->repeat_pict = 2;
+ s->current_picture_ptr->f.repeat_pict = 2;
} else if (s->progressive_frame) {
- s->current_picture_ptr->repeat_pict = 1;
+ s->current_picture_ptr->f.repeat_pict = 1;
}
}
- *s->current_picture_ptr->pan_scan= s1->pan_scan;
+ *s->current_picture_ptr->f.pan_scan = s1->pan_scan;
if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
ff_thread_finish_setup(avctx);
@@ -1643,9 +1643,9 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
}
for(i=0; i<4; i++){
- s->current_picture.data[i] = s->current_picture_ptr->data[i];
+ s->current_picture.f.data[i] = s->current_picture_ptr->f.data[i];
if(s->picture_structure == PICT_BOTTOM_FIELD){
- s->current_picture.data[i] += s->current_picture_ptr->linesize[i];
+ s->current_picture.f.data[i] += s->current_picture_ptr->f.linesize[i];
}
}
}
@@ -1767,7 +1767,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
if(mpeg_decode_mb(s, s->block) < 0)
return -1;
- if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs
+ if (s->current_picture.f.motion_val[0] && !s->encoding) { //note motion_val is normally NULL unless we want to extract the MVs
const int wrap = s->b8_stride;
int xy = s->mb_x*2 + s->mb_y*2*wrap;
int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride);
@@ -1785,12 +1785,12 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
motion_y = s->mv[dir][i][1];
}
- s->current_picture.motion_val[dir][xy ][0] = motion_x;
- s->current_picture.motion_val[dir][xy ][1] = motion_y;
- s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
- s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
- s->current_picture.ref_index [dir][b8_xy ]=
- s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i];
+ s->current_picture.f.motion_val[dir][xy ][0] = motion_x;
+ s->current_picture.f.motion_val[dir][xy ][1] = motion_y;
+ s->current_picture.f.motion_val[dir][xy + 1][0] = motion_x;
+ s->current_picture.f.motion_val[dir][xy + 1][1] = motion_y;
+ s->current_picture.f.ref_index [dir][b8_xy ] =
+ s->current_picture.f.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
}
xy += wrap;
@@ -1954,7 +1954,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
/* end of image */
- s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2;
+ s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2;
ff_er_frame_end(s);