aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/mpeg4videodec.c
diff options
context:
space:
mode:
authorAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2023-10-04 03:34:28 +0200
committerAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2024-06-12 11:19:44 +0200
commitec1eba792aed90df5e151cb3c68e67d3d9730834 (patch)
tree4f42b643f05da0617268eb7523ab4787ba056643 /libavcodec/mpeg4videodec.c
parent3a4e7694a13edc185a00393c2e6872ff3e17756b (diff)
downloadffmpeg-ec1eba792aed90df5e151cb3c68e67d3d9730834.tar.gz
avcodec/mpegvideo: Shorten variable names
current_picture->cur_pic, last_picture->last_pic, similarly for new_picture and next_picture. Also rename the corresponding *_ptr fields. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
Diffstat (limited to 'libavcodec/mpeg4videodec.c')
-rw-r--r--libavcodec/mpeg4videodec.c84
1 files changed, 42 insertions, 42 deletions
diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c
index 153a9371e0..0cc0bd3f23 100644
--- a/libavcodec/mpeg4videodec.c
+++ b/libavcodec/mpeg4videodec.c
@@ -316,7 +316,7 @@ void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir)
{
int i;
int16_t *ac_val, *ac_val1;
- int8_t *const qscale_table = s->current_picture.qscale_table;
+ int8_t *const qscale_table = s->cur_pic.qscale_table;
/* find prediction */
ac_val = &s->ac_val[0][0][0] + s->block_index[n] * 16;
@@ -970,13 +970,13 @@ static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
} while (cbpc == 8);
s->cbp_table[xy] = cbpc & 3;
- s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
+ s->cur_pic.mb_type[xy] = MB_TYPE_INTRA;
s->mb_intra = 1;
if (cbpc & 4)
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
- s->current_picture.qscale_table[xy] = s->qscale;
+ s->cur_pic.qscale_table[xy] = s->qscale;
s->mbintra_table[xy] = 1;
for (i = 0; i < 6; i++) {
@@ -994,7 +994,7 @@ static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
s->pred_dir_table[xy] = dir;
} else { /* P/S_TYPE */
int mx, my, pred_x, pred_y, bits;
- int16_t *const mot_val = s->current_picture.motion_val[0][s->block_index[0]];
+ int16_t *const mot_val = s->cur_pic.motion_val[0][s->block_index[0]];
const int stride = s->b8_stride * 2;
try_again:
@@ -1007,14 +1007,14 @@ try_again:
/* skip mb */
if (s->pict_type == AV_PICTURE_TYPE_S &&
ctx->vol_sprite_usage == GMC_SPRITE) {
- s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
+ s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_16x16 |
MB_TYPE_GMC |
MB_TYPE_L0;
mx = get_amv(ctx, 0);
my = get_amv(ctx, 1);
} else {
- s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
+ s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_16x16 |
MB_TYPE_L0;
mx = my = 0;
@@ -1047,7 +1047,7 @@ try_again:
s->mb_intra = ((cbpc & 4) != 0);
if (s->mb_intra) {
- s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
+ s->cur_pic.mb_type[xy] = MB_TYPE_INTRA;
s->mbintra_table[xy] = 1;
mot_val[0] =
mot_val[2] =
@@ -1080,12 +1080,12 @@ try_again:
my = ff_h263_decode_motion(s, pred_y, s->f_code);
if (my >= 0xffff)
return AVERROR_INVALIDDATA;
- s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
+ s->cur_pic.mb_type[xy] = MB_TYPE_16x16 |
MB_TYPE_L0;
} else {
mx = get_amv(ctx, 0);
my = get_amv(ctx, 1);
- s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
+ s->cur_pic.mb_type[xy] = MB_TYPE_16x16 |
MB_TYPE_GMC |
MB_TYPE_L0;
}
@@ -1100,7 +1100,7 @@ try_again:
mot_val[3 + stride] = my;
} else {
int i;
- s->current_picture.mb_type[xy] = MB_TYPE_8x8 |
+ s->cur_pic.mb_type[xy] = MB_TYPE_8x8 |
MB_TYPE_L0;
for (i = 0; i < 4; i++) {
int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
@@ -1156,9 +1156,9 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
}
s->cbp_table[xy] |= cbpy << 2;
- s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
+ s->cur_pic.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
} else { /* P || S_TYPE */
- if (IS_INTRA(s->current_picture.mb_type[xy])) {
+ if (IS_INTRA(s->cur_pic.mb_type[xy])) {
int i;
int dir = 0;
int ac_pred = get_bits1(&s->gb);
@@ -1172,7 +1172,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
if (s->cbp_table[xy] & 8)
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
- s->current_picture.qscale_table[xy] = s->qscale;
+ s->cur_pic.qscale_table[xy] = s->qscale;
for (i = 0; i < 6; i++) {
int dc_pred_dir;
@@ -1188,10 +1188,10 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
}
s->cbp_table[xy] &= 3; // remove dquant
s->cbp_table[xy] |= cbpy << 2;
- s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
+ s->cur_pic.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
s->pred_dir_table[xy] = dir;
- } else if (IS_SKIP(s->current_picture.mb_type[xy])) {
- s->current_picture.qscale_table[xy] = s->qscale;
+ } else if (IS_SKIP(s->cur_pic.mb_type[xy])) {
+ s->cur_pic.qscale_table[xy] = s->qscale;
s->cbp_table[xy] = 0;
} else {
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1);
@@ -1204,7 +1204,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
if (s->cbp_table[xy] & 8)
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
- s->current_picture.qscale_table[xy] = s->qscale;
+ s->cur_pic.qscale_table[xy] = s->qscale;
s->cbp_table[xy] &= 3; // remove dquant
s->cbp_table[xy] |= (cbpy ^ 0xf) << 2;
@@ -1567,20 +1567,20 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
av_assert2(s == (void*)ctx);
- mb_type = s->current_picture.mb_type[xy];
+ mb_type = s->cur_pic.mb_type[xy];
cbp = s->cbp_table[xy];
use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold;
- if (s->current_picture.qscale_table[xy] != s->qscale)
- ff_set_qscale(s, s->current_picture.qscale_table[xy]);
+ if (s->cur_pic.qscale_table[xy] != s->qscale)
+ ff_set_qscale(s, s->cur_pic.qscale_table[xy]);
if (s->pict_type == AV_PICTURE_TYPE_P ||
s->pict_type == AV_PICTURE_TYPE_S) {
int i;
for (i = 0; i < 4; i++) {
- s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
- s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
+ s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
+ s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
}
s->mb_intra = IS_INTRA(mb_type);
@@ -1594,14 +1594,14 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
&& ctx->vol_sprite_usage == GMC_SPRITE) {
s->mcsel = 1;
s->mb_skipped = 0;
- s->current_picture.mbskip_table[xy] = 0;
+ s->cur_pic.mbskip_table[xy] = 0;
} else {
s->mcsel = 0;
s->mb_skipped = 1;
- s->current_picture.mbskip_table[xy] = 1;
+ s->cur_pic.mbskip_table[xy] = 1;
}
} else if (s->mb_intra) {
- s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
+ s->ac_pred = IS_ACPRED(s->cur_pic.mb_type[xy]);
} else if (!s->mb_intra) {
// s->mcsel = 0; // FIXME do we need to init that?
@@ -1614,7 +1614,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
}
} else { /* I-Frame */
s->mb_intra = 1;
- s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
+ s->ac_pred = IS_ACPRED(s->cur_pic.mb_type[xy]);
}
if (!IS_SKIP(mb_type)) {
@@ -1673,23 +1673,23 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv_type = MV_TYPE_16X16;
if (s->pict_type == AV_PICTURE_TYPE_S &&
ctx->vol_sprite_usage == GMC_SPRITE) {
- s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
+ s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_GMC |
MB_TYPE_16x16 |
MB_TYPE_L0;
s->mcsel = 1;
s->mv[0][0][0] = get_amv(ctx, 0);
s->mv[0][0][1] = get_amv(ctx, 1);
- s->current_picture.mbskip_table[xy] = 0;
+ s->cur_pic.mbskip_table[xy] = 0;
s->mb_skipped = 0;
} else {
- s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
+ s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_16x16 |
MB_TYPE_L0;
s->mcsel = 0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
- s->current_picture.mbskip_table[xy] = 1;
+ s->cur_pic.mbskip_table[xy] = 1;
s->mb_skipped = 1;
}
goto end;
@@ -1730,7 +1730,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
if (s->mcsel) {
- s->current_picture.mb_type[xy] = MB_TYPE_GMC |
+ s->cur_pic.mb_type[xy] = MB_TYPE_GMC |
MB_TYPE_16x16 |
MB_TYPE_L0;
/* 16x16 global motion prediction */
@@ -1740,7 +1740,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
} else if ((!s->progressive_sequence) && get_bits1(&s->gb)) {
- s->current_picture.mb_type[xy] = MB_TYPE_16x8 |
+ s->cur_pic.mb_type[xy] = MB_TYPE_16x8 |
MB_TYPE_L0 |
MB_TYPE_INTERLACED;
/* 16x8 field motion prediction */
@@ -1764,7 +1764,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][i][1] = my;
}
} else {
- s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+ s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
@@ -1781,7 +1781,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][0][1] = my;
}
} else {
- s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
+ s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for (i = 0; i < 4; i++) {
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
@@ -1814,11 +1814,11 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->last_mv[i][1][1] = 0;
}
- ff_thread_await_progress(&s->next_picture_ptr->tf, s->mb_y, 0);
+ ff_thread_await_progress(&s->next_pic_ptr->tf, s->mb_y, 0);
}
/* if we skipped it in the future P-frame than skip it now too */
- s->mb_skipped = s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
+ s->mb_skipped = s->next_pic.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
if (s->mb_skipped) {
/* skip mb */
@@ -1831,7 +1831,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][0][1] =
s->mv[1][0][0] =
s->mv[1][0][1] = 0;
- s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
+ s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_16x16 |
MB_TYPE_L0;
goto end;
@@ -1951,7 +1951,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
}
- s->current_picture.mb_type[xy] = mb_type;
+ s->cur_pic.mb_type[xy] = mb_type;
} else { /* I-Frame */
int use_intra_dc_vlc;
@@ -1970,9 +1970,9 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
intra:
s->ac_pred = get_bits1(&s->gb);
if (s->ac_pred)
- s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
+ s->cur_pic.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
else
- s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
+ s->cur_pic.mb_type[xy] = MB_TYPE_INTRA;
cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1);
if (cbpy < 0) {
@@ -2019,11 +2019,11 @@ end:
if (s->pict_type == AV_PICTURE_TYPE_B) {
const int delta = s->mb_x + 1 == s->mb_width ? 2 : 1;
- ff_thread_await_progress(&s->next_picture_ptr->tf,
+ ff_thread_await_progress(&s->next_pic_ptr->tf,
(s->mb_x + delta >= s->mb_width)
? FFMIN(s->mb_y + 1, s->mb_height - 1)
: s->mb_y, 0);
- if (s->next_picture.mbskip_table[xy + delta])
+ if (s->next_pic.mbskip_table[xy + delta])
return SLICE_OK;
}