aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/h263.c
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2011-07-06 20:08:30 +0200
committerDiego Biurrun <diego@biurrun.de>2011-07-11 00:19:00 +0200
commit657ccb5ac75ce34e62bd67f228d9bd36db72189e (patch)
treee490b0d6b4ff93490f70d4f67a0551dd187147e3 /libavcodec/h263.c
parent142e76f1055de5dde44696e71a5f63f2cb11dedf (diff)
downloadffmpeg-657ccb5ac75ce34e62bd67f228d9bd36db72189e.tar.gz
Eliminate FF_COMMON_FRAME macro.
FF_COMMON_FRAME holds the contents of the AVFrame structure and is also copied to struct Picture. Replace by an embedded AVFrame structure in struct Picture.
Diffstat (limited to 'libavcodec/h263.c')
-rw-r--r--libavcodec/h263.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/libavcodec/h263.c b/libavcodec/h263.c
index 5c25df2272..b3c93027ee 100644
--- a/libavcodec/h263.c
+++ b/libavcodec/h263.c
@@ -52,7 +52,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
const int wrap = s->b8_stride;
const int xy = s->block_index[0];
- s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
+ s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped;
if(s->mv_type != MV_TYPE_8X8){
int motion_x, motion_y;
@@ -71,30 +71,30 @@ void ff_h263_update_motion_val(MpegEncContext * s){
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
}
- s->current_picture.ref_index[0][4*mb_xy ]=
- s->current_picture.ref_index[0][4*mb_xy + 1]= s->field_select[0][0];
- s->current_picture.ref_index[0][4*mb_xy + 2]=
- s->current_picture.ref_index[0][4*mb_xy + 3]= s->field_select[0][1];
+ s->current_picture.f.ref_index[0][4*mb_xy ] =
+ s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
+ s->current_picture.f.ref_index[0][4*mb_xy + 2] =
+ s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
}
/* no update if 8X8 because it has been done during parsing */
- s->current_picture.motion_val[0][xy][0] = motion_x;
- s->current_picture.motion_val[0][xy][1] = motion_y;
- s->current_picture.motion_val[0][xy + 1][0] = motion_x;
- s->current_picture.motion_val[0][xy + 1][1] = motion_y;
- s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
- s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
- s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
- s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
+ s->current_picture.f.motion_val[0][xy][0] = motion_x;
+ s->current_picture.f.motion_val[0][xy][1] = motion_y;
+ s->current_picture.f.motion_val[0][xy + 1][0] = motion_x;
+ s->current_picture.f.motion_val[0][xy + 1][1] = motion_y;
+ s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x;
+ s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y;
+ s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x;
+ s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y;
}
if(s->encoding){ //FIXME encoding MUST be cleaned up
if (s->mv_type == MV_TYPE_8X8)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
else if(s->mb_intra)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA;
else
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
}
}
@@ -154,7 +154,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
Diag Top
Left Center
*/
- if(!IS_SKIP(s->current_picture.mb_type[xy])){
+ if (!IS_SKIP(s->current_picture.f.mb_type[xy])) {
qp_c= s->qscale;
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
@@ -164,10 +164,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_y){
int qp_dt, qp_tt, qp_tc;
- if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
+ if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride]))
qp_tt=0;
else
- qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
+ qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride];
if(qp_c)
qp_tc= qp_c;
@@ -187,10 +187,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
if(s->mb_x){
- if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
+ if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride]))
qp_dt= qp_tt;
else
- qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
+ qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride];
if(qp_dt){
const int chroma_qp= s->chroma_qscale_table[qp_dt];
@@ -209,10 +209,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_x){
int qp_lc;
- if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
+ if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1]))
qp_lc= qp_c;
else
- qp_lc= s->current_picture.qscale_table[xy-1];
+ qp_lc = s->current_picture.f.qscale_table[xy - 1];
if(qp_lc){
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
@@ -321,7 +321,7 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride;
- mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
+ mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1];
/* special case for first (slice) line */