aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/rv34.c
diff options
context:
space:
mode:
authorAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2023-10-04 03:34:28 +0200
committerAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2024-06-12 11:19:44 +0200
commitec1eba792aed90df5e151cb3c68e67d3d9730834 (patch)
tree4f42b643f05da0617268eb7523ab4787ba056643 /libavcodec/rv34.c
parent3a4e7694a13edc185a00393c2e6872ff3e17756b (diff)
downloadffmpeg-ec1eba792aed90df5e151cb3c68e67d3d9730834.tar.gz
avcodec/mpegvideo: Shorten variable names
current_picture->cur_pic, last_picture->last_pic, similarly for new_picture and next_picture. Also rename the corresponding *_ptr fields. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
Diffstat (limited to 'libavcodec/rv34.c')
-rw-r--r--libavcodec/rv34.c156
1 files changed, 78 insertions, 78 deletions
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index cfd4cd259a..9425c0eb66 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -369,7 +369,7 @@ static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
r->is16 = get_bits1(gb);
if(r->is16){
- s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
+ s->cur_pic_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
r->block_type = RV34_MB_TYPE_INTRA16x16;
t = get_bits(gb, 2);
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
@@ -379,7 +379,7 @@ static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
if(!get_bits1(gb))
av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
}
- s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
+ s->cur_pic_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
r->block_type = RV34_MB_TYPE_INTRA;
if(r->decode_intra_types(r, gb, intra_types) < 0)
return -1;
@@ -405,7 +405,7 @@ static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
r->block_type = r->decode_mb_info(r);
if(r->block_type == -1)
return -1;
- s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
+ s->cur_pic_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
r->mb_type[mb_pos] = r->block_type;
if(r->block_type == RV34_MB_SKIP){
if(s->pict_type == AV_PICTURE_TYPE_P)
@@ -413,7 +413,7 @@ static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
if(s->pict_type == AV_PICTURE_TYPE_B)
r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
}
- r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
+ r->is16 = !!IS_INTRA16x16(s->cur_pic_ptr->mb_type[mb_pos]);
if (rv34_decode_mv(r, r->block_type) < 0)
return -1;
if(r->block_type == RV34_MB_SKIP){
@@ -423,7 +423,7 @@ static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
r->chroma_vlc = 1;
r->luma_vlc = 0;
- if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
+ if(IS_INTRA(s->cur_pic_ptr->mb_type[mb_pos])){
if(r->is16){
t = get_bits(gb, 2);
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
@@ -488,27 +488,27 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int
c_off = -1;
if(avail[-1]){
- A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
- A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
+ A[0] = s->cur_pic_ptr->motion_val[0][mv_pos-1][0];
+ A[1] = s->cur_pic_ptr->motion_val[0][mv_pos-1][1];
}
if(avail[-4]){
- B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
- B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
+ B[0] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride][0];
+ B[1] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride][1];
}else{
B[0] = A[0];
B[1] = A[1];
}
if(!avail[c_off-4]){
if(avail[-4] && (avail[-1] || r->rv30)){
- C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
- C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
+ C[0] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
+ C[1] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
}else{
C[0] = A[0];
C[1] = A[1];
}
}else{
- C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
- C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
+ C[0] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
+ C[1] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
}
mx = mid_pred(A[0], B[0], C[0]);
my = mid_pred(A[1], B[1], C[1]);
@@ -516,8 +516,8 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int
my += r->dmv[dmv_no][1];
for(j = 0; j < part_sizes_h[block_type]; j++){
for(i = 0; i < part_sizes_w[block_type]; i++){
- s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
- s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
+ s->cur_pic_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
+ s->cur_pic_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
}
}
}
@@ -566,7 +566,7 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
int has_A = 0, has_B = 0, has_C = 0;
int mx, my;
int i, j;
- Picture *cur_pic = s->current_picture_ptr;
+ Picture *cur_pic = s->cur_pic_ptr;
const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
int type = cur_pic->mb_type[mb_pos];
@@ -619,27 +619,27 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
int* avail = r->avail_cache + avail_indexes[0];
if(avail[-1]){
- A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
- A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
+ A[0] = s->cur_pic_ptr->motion_val[0][mv_pos - 1][0];
+ A[1] = s->cur_pic_ptr->motion_val[0][mv_pos - 1][1];
}
if(avail[-4]){
- B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
- B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
+ B[0] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride][0];
+ B[1] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride][1];
}else{
B[0] = A[0];
B[1] = A[1];
}
if(!avail[-4 + 2]){
if(avail[-4] && (avail[-1])){
- C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
- C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
+ C[0] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
+ C[1] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
}else{
C[0] = A[0];
C[1] = A[1];
}
}else{
- C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
- C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
+ C[0] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
+ C[1] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
}
mx = mid_pred(A[0], B[0], C[0]);
my = mid_pred(A[1], B[1], C[1]);
@@ -648,8 +648,8 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
for(j = 0; j < 2; j++){
for(i = 0; i < 2; i++){
for(k = 0; k < 2; k++){
- s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
- s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
+ s->cur_pic_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
+ s->cur_pic_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
}
}
}
@@ -688,24 +688,24 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
if(thirdpel){
int chroma_mx, chroma_my;
- mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
- my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
- lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
- ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
- chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
- chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
+ mx = (s->cur_pic_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
+ my = (s->cur_pic_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
+ lx = (s->cur_pic_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
+ ly = (s->cur_pic_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
+ chroma_mx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] / 2;
+ chroma_my = s->cur_pic_ptr->motion_val[dir][mv_pos][1] / 2;
umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
}else{
int cx, cy;
- mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
- my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
- lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
- ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
- cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
- cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
+ mx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] >> 2;
+ my = s->cur_pic_ptr->motion_val[dir][mv_pos][1] >> 2;
+ lx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] & 3;
+ ly = s->cur_pic_ptr->motion_val[dir][mv_pos][1] & 3;
+ cx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] / 2;
+ cy = s->cur_pic_ptr->motion_val[dir][mv_pos][1] / 2;
umx = cx >> 2;
umy = cy >> 2;
uvmx = (cx & 3) << 1;
@@ -718,14 +718,14 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
/* wait for the referenced mb row to be finished */
int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
- const ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
+ const ThreadFrame *f = dir ? &s->next_pic_ptr->tf : &s->last_pic_ptr->tf;
ff_thread_await_progress(f, mb_row, 0);
}
dxy = ly*4 + lx;
- srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
- srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
- srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
+ srcY = dir ? s->next_pic_ptr->f->data[0] : s->last_pic_ptr->f->data[0];
+ srcU = dir ? s->next_pic_ptr->f->data[1] : s->last_pic_ptr->f->data[1];
+ srcV = dir ? s->next_pic_ptr->f->data[2] : s->last_pic_ptr->f->data[2];
src_x = s->mb_x * 16 + xoff + mx;
src_y = s->mb_y * 16 + yoff + my;
uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
@@ -886,11 +886,11 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type)
switch(block_type){
case RV34_MB_TYPE_INTRA:
case RV34_MB_TYPE_INTRA16x16:
- ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
return 0;
case RV34_MB_SKIP:
if(s->pict_type == AV_PICTURE_TYPE_P){
- ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
break;
}
@@ -898,23 +898,23 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type)
//surprisingly, it uses motion scheme from next reference frame
/* wait for the current mb row to be finished */
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
- ff_thread_await_progress(&s->next_picture_ptr->tf, FFMAX(0, s->mb_y-1), 0);
+ ff_thread_await_progress(&s->next_pic_ptr->tf, FFMAX(0, s->mb_y-1), 0);
- next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
+ next_bt = s->next_pic_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
- ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
- ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->cur_pic_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
}else
for(j = 0; j < 2; j++)
for(i = 0; i < 2; i++)
for(k = 0; k < 2; k++)
for(l = 0; l < 2; l++)
- s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
+ s->cur_pic_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_pic_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
rv34_mc_2mv(r, block_type);
else
rv34_mc_2mv_skip(r);
- ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
break;
case RV34_MB_P_16x16:
case RV34_MB_P_MIX16x16:
@@ -1182,7 +1182,7 @@ static int rv34_set_deblock_coef(RV34DecContext *r)
MpegEncContext *s = &r->s;
int hmvmask = 0, vmvmask = 0, i, j;
int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
- int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
+ int16_t (*motion_val)[2] = &s->cur_pic_ptr->motion_val[0][midx];
for(j = 0; j < 16; j += 8){
for(i = 0; i < 2; i++){
if(is_mv_diff_gt_3(motion_val + i, 1))
@@ -1225,26 +1225,26 @@ static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
if(s->mb_x && dist)
r->avail_cache[5] =
- r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
+ r->avail_cache[9] = s->cur_pic_ptr->mb_type[mb_pos - 1];
if(dist >= s->mb_width)
r->avail_cache[2] =
- r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
+ r->avail_cache[3] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride];
if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
- r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
+ r->avail_cache[4] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride + 1];
if(s->mb_x && dist > s->mb_width)
- r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
+ r->avail_cache[1] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride - 1];
s->qscale = r->si.quant;
cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
r->cbp_luma [mb_pos] = cbp;
r->cbp_chroma[mb_pos] = cbp >> 16;
r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
- s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
+ s->cur_pic_ptr->qscale_table[mb_pos] = s->qscale;
if(cbp == -1)
return -1;
- if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
+ if (IS_INTRA(s->cur_pic_ptr->mb_type[mb_pos])){
if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
else rv34_output_intra(r, intra_types, cbp);
return 0;
@@ -1327,21 +1327,21 @@ static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
if(s->mb_x && dist)
r->avail_cache[5] =
- r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
+ r->avail_cache[9] = s->cur_pic_ptr->mb_type[mb_pos - 1];
if(dist >= s->mb_width)
r->avail_cache[2] =
- r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
+ r->avail_cache[3] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride];
if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
- r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
+ r->avail_cache[4] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride + 1];
if(s->mb_x && dist > s->mb_width)
- r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
+ r->avail_cache[1] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride - 1];
s->qscale = r->si.quant;
cbp = rv34_decode_intra_mb_header(r, intra_types);
r->cbp_luma [mb_pos] = cbp;
r->cbp_chroma[mb_pos] = cbp >> 16;
r->deblock_coefs[mb_pos] = 0xFFFF;
- s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
+ s->cur_pic_ptr->qscale_table[mb_pos] = s->qscale;
if(cbp == -1)
return -1;
@@ -1482,7 +1482,7 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
r->loop_filter(r, s->mb_y - 2);
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
- ff_thread_report_progress(&s->current_picture_ptr->tf,
+ ff_thread_report_progress(&s->cur_pic_ptr->tf,
s->mb_y - 2, 0);
}
@@ -1580,19 +1580,19 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
s->mb_num_left = 0;
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
- ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
+ ff_thread_report_progress(&s->cur_pic_ptr->tf, INT_MAX, 0);
if (s->pict_type == AV_PICTURE_TYPE_B) {
- if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->cur_pic_ptr->f)) < 0)
return ret;
- ff_print_debug_info(s, s->current_picture_ptr, pict);
- ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
+ ff_print_debug_info(s, s->cur_pic_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->cur_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
got_picture = 1;
- } else if (s->last_picture_ptr) {
- if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
+ } else if (s->last_pic_ptr) {
+ if ((ret = av_frame_ref(pict, s->last_pic_ptr->f)) < 0)
return ret;
- ff_print_debug_info(s, s->last_picture_ptr, pict);
- ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
+ ff_print_debug_info(s, s->last_pic_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->last_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
got_picture = 1;
}
@@ -1627,10 +1627,10 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
/* no supplementary picture */
if (buf_size == 0) {
/* special case for last picture */
- if (s->next_picture_ptr) {
- if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
+ if (s->next_pic_ptr) {
+ if ((ret = av_frame_ref(pict, s->next_pic_ptr->f)) < 0)
return ret;
- s->next_picture_ptr = NULL;
+ s->next_pic_ptr = NULL;
*got_picture_ptr = 1;
}
@@ -1653,7 +1653,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
return AVERROR_INVALIDDATA;
}
- if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
+ if ((!s->last_pic_ptr || !s->last_pic_ptr->f->data[0]) &&
si.type == AV_PICTURE_TYPE_B) {
av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
"reference data.\n");
@@ -1666,7 +1666,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
/* first slice */
if (si.start == 0) {
- if (s->mb_num_left > 0 && s->current_picture_ptr) {
+ if (s->mb_num_left > 0 && s->cur_pic_ptr) {
av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
s->mb_num_left);
if (!s->context_reinit)
@@ -1791,7 +1791,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
break;
}
- if (s->current_picture_ptr) {
+ if (s->cur_pic_ptr) {
if (last) {
if(r->loop_filter)
r->loop_filter(r, s->mb_height - 1);
@@ -1808,7 +1808,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
ff_er_frame_end(&s->er, NULL);
ff_mpv_frame_end(s);
s->mb_num_left = 0;
- ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
+ ff_thread_report_progress(&s->cur_pic_ptr->tf, INT_MAX, 0);
return AVERROR_INVALIDDATA;
}
}