diff options
author | Vittorio Giovara <vittorio.giovara@gmail.com> | 2014-02-28 11:31:56 +0100 |
---|---|---|
committer | Vittorio Giovara <vittorio.giovara@gmail.com> | 2014-03-16 23:01:00 +0100 |
commit | d66e305bd1b4f3e91ae4e7e549148509d0811672 (patch) | |
tree | 0c2d149ae94922d03c098d08e938d3f64ee01bb9 /libavcodec | |
parent | 1c79b1625d4d257bfd01eccb84cc0ab355fb9a9e (diff) | |
download | ffmpeg-d66e305bd1b4f3e91ae4e7e549148509d0811672.tar.gz |
er: move relevant fields from Picture to ERPicture
This is done to disentangle ER from mpegvideo. In order to use a
classic Picture, callers can use ff_mpeg_set_erpic() or use a custom function
to set the fields. Please note that buffers need to be allocated before
calling ff_er_frame_end().
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/error_resilience.c | 215 | ||||
-rw-r--r-- | libavcodec/error_resilience.h | 19 | ||||
-rw-r--r-- | libavcodec/h264.c | 8 | ||||
-rw-r--r-- | libavcodec/mpegvideo.c | 25 | ||||
-rw-r--r-- | libavcodec/mpegvideo.h | 2 |
5 files changed, 145 insertions, 124 deletions
diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c index 12171352f4..17346e15d4 100644 --- a/libavcodec/error_resilience.c +++ b/libavcodec/error_resilience.c @@ -57,7 +57,7 @@ static void set_mv_strides(ERContext *s, int *mv_step, int *stride) static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y) { - int *linesize = s->cur_pic->f.linesize; + int *linesize = s->cur_pic.f->linesize; int dc, dcu, dcv, y, i; for (i = 0; i < 4; i++) { dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride]; @@ -145,7 +145,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w, mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride; error = s->error_status_table[mb_index]; - if (IS_INTER(s->cur_pic->mb_type[mb_index])) + if (IS_INTER(s->cur_pic.mb_type[mb_index])) continue; // inter if (!(error & ER_DC_ERROR)) continue; // dc-ok @@ -154,7 +154,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w, for (j = b_x + 1; j < w; j++) { int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride; int error_j = s->error_status_table[mb_index_j]; - int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]); + int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if (intra_j == 0 || !(error_j & ER_DC_ERROR)) { color[0] = dc[j + b_y * stride]; distance[0] = j - b_x; @@ -166,7 +166,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w, for (j = b_x - 1; j >= 0; j--) { int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride; int error_j = s->error_status_table[mb_index_j]; - int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]); + int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if (intra_j == 0 || !(error_j & ER_DC_ERROR)) { color[1] = dc[j + b_y * stride]; distance[1] = b_x - j; @@ -178,7 +178,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w, for (j = b_y + 1; j < h; j++) { int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride; int error_j = s->error_status_table[mb_index_j]; - int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]); + int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if (intra_j == 0 || !(error_j & ER_DC_ERROR)) { color[2] = dc[b_x + j * stride]; @@ -191,7 +191,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w, for (j = b_y - 1; j >= 0; j--) { int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride; int error_j = s->error_status_table[mb_index_j]; - int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]); + int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if (intra_j == 0 || !(error_j & ER_DC_ERROR)) { color[3] = dc[b_x + j * stride]; distance[3] = b_y - j; @@ -231,13 +231,13 @@ static void h_block_filter(ERContext *s, uint8_t *dst, int w, int y; int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]; - int left_intra = IS_INTRA(s->cur_pic->mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]); - int right_intra = IS_INTRA(s->cur_pic->mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]); + int left_intra = IS_INTRA(s->cur_pic.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]); + int right_intra = IS_INTRA(s->cur_pic.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int left_damage = left_status & ER_MB_ERROR; int right_damage = right_status & ER_MB_ERROR; int offset = b_x * 8 + b_y * stride * 8; - int16_t *left_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; - int16_t *right_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)]; + int16_t *left_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; + int16_t *right_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)]; if (!(left_damage || right_damage)) continue; // both undamaged if ((!left_intra) && (!right_intra) && @@ -299,14 +299,14 @@ static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h, int x; int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]; - int top_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]); - int bottom_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]); + int top_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]); + int bottom_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]); int top_damage = top_status & ER_MB_ERROR; int bottom_damage = bottom_status & ER_MB_ERROR; int offset = b_x * 8 + b_y * stride * 8; - int16_t *top_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; - int16_t *bottom_mv = s->cur_pic->motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x]; + int16_t *top_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; + int16_t *bottom_mv = s->cur_pic.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x]; if (!(top_damage || bottom_damage)) continue; // both undamaged @@ -371,7 +371,7 @@ static void guess_mv(ERContext *s) int f = 0; int error = s->error_status_table[mb_xy]; - if (IS_INTRA(s->cur_pic->mb_type[mb_xy])) + if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) f = MV_FROZEN; // intra // FIXME check if (!(error & ER_MV_ERROR)) f = MV_FROZEN; // inter with undamaged MV @@ -386,9 +386,9 @@ static void guess_mv(ERContext *s) for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; - int mv_dir = (s->last_pic && s->last_pic->f.data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD; + int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD; - if (IS_INTRA(s->cur_pic->mb_type[mb_xy])) + if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) continue; if (!(s->error_status_table[mb_xy] & ER_MV_ERROR)) continue; @@ -429,8 +429,8 @@ static void guess_mv(ERContext *s) if (fixed[mb_xy] == MV_FROZEN) continue; - assert(!IS_INTRA(s->cur_pic->mb_type[mb_xy])); - assert(s->last_pic && s->last_pic->f.data[0]); + assert(!IS_INTRA(s->cur_pic.mb_type[mb_xy])); + assert(s->last_pic && s->last_pic.f->data[0]); j = 0; if (mb_x > 0 && fixed[mb_xy - 1] == MV_FROZEN) @@ -460,38 +460,38 @@ static void guess_mv(ERContext *s) if (mb_x > 0 && fixed[mb_xy - 1]) { mv_predictor[pred_count][0] = - s->cur_pic->motion_val[0][mot_index - mot_step][0]; + s->cur_pic.motion_val[0][mot_index - mot_step][0]; mv_predictor[pred_count][1] = - s->cur_pic->motion_val[0][mot_index - mot_step][1]; + s->cur_pic.motion_val[0][mot_index - mot_step][1]; ref[pred_count] = - s->cur_pic->ref_index[0][4 * (mb_xy - 1)]; + s->cur_pic.ref_index[0][4 * (mb_xy - 1)]; pred_count++; } if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) { mv_predictor[pred_count][0] = - s->cur_pic->motion_val[0][mot_index + mot_step][0]; + s->cur_pic.motion_val[0][mot_index + mot_step][0]; mv_predictor[pred_count][1] = - s->cur_pic->motion_val[0][mot_index + mot_step][1]; + s->cur_pic.motion_val[0][mot_index + mot_step][1]; ref[pred_count] = - s->cur_pic->ref_index[0][4 * (mb_xy + 1)]; + s->cur_pic.ref_index[0][4 * (mb_xy + 1)]; pred_count++; } if (mb_y > 0 && fixed[mb_xy - mb_stride]) { mv_predictor[pred_count][0] = - s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][0]; + s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0]; mv_predictor[pred_count][1] = - s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][1]; + s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1]; ref[pred_count] = - s->cur_pic->ref_index[0][4 * (mb_xy - s->mb_stride)]; + s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)]; pred_count++; } if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) { mv_predictor[pred_count][0] = - s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][0]; + s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0]; mv_predictor[pred_count][1] = - s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][1]; + s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1]; ref[pred_count] = - s->cur_pic->ref_index[0][4 * (mb_xy + s->mb_stride)]; + s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)]; pred_count++; } if (pred_count == 0) @@ -549,19 +549,19 @@ skip_mean_and_median: if (s->avctx->codec_id == AV_CODEC_ID_H264) { // FIXME } else { - ff_thread_await_progress(&s->last_pic->tf, + ff_thread_await_progress(s->last_pic.tf, mb_y, 0); } - if (!s->last_pic->motion_val[0] || - !s->last_pic->ref_index[0]) + if (!s->last_pic.motion_val[0] || + !s->last_pic.ref_index[0]) goto skip_last_mv; - prev_x = s->last_pic->motion_val[0][mot_index][0]; - prev_y = s->last_pic->motion_val[0][mot_index][1]; - prev_ref = s->last_pic->ref_index[0][4 * mb_xy]; + prev_x = s->last_pic.motion_val[0][mot_index][0]; + prev_y = s->last_pic.motion_val[0][mot_index][1]; + prev_ref = s->last_pic.ref_index[0][4 * mb_xy]; } else { - prev_x = s->cur_pic->motion_val[0][mot_index][0]; - prev_y = s->cur_pic->motion_val[0][mot_index][1]; - prev_ref = s->cur_pic->ref_index[0][4 * mb_xy]; + prev_x = s->cur_pic.motion_val[0][mot_index][0]; + prev_y = s->cur_pic.motion_val[0][mot_index][1]; + prev_ref = s->cur_pic.ref_index[0][4 * mb_xy]; } /* last MV */ @@ -573,14 +573,14 @@ skip_mean_and_median: skip_last_mv: for (j = 0; j < pred_count; j++) { - int *linesize = s->cur_pic->f.linesize; + int *linesize = s->cur_pic.f->linesize; int score = 0; - uint8_t *src = s->cur_pic->f.data[0] + + uint8_t *src = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; - s->cur_pic->motion_val[0][mot_index][0] = + s->cur_pic.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0]; - s->cur_pic->motion_val[0][mot_index][1] = + s->cur_pic.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1]; // predictor intra or otherwise not available @@ -625,8 +625,8 @@ skip_last_mv: for (i = 0; i < mot_step; i++) for (j = 0; j < mot_step; j++) { - s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0]; - s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1]; + s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0]; + s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1]; } s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD, @@ -657,7 +657,7 @@ static int is_intra_more_likely(ERContext *s) { int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y; - if (!s->last_pic || !s->last_pic->f.data[0]) + if (!s->last_pic.f || !s->last_pic.f->data[0]) return 1; // no previous frame available -> use spatial prediction undamaged_count = 0; @@ -679,7 +679,7 @@ FF_DISABLE_DEPRECATION_WARNINGS // prevent dsp.sad() check, that requires access to the image if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && - s->cur_pic->f.pict_type == AV_PICTURE_TYPE_I) + s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) return 1; FF_ENABLE_DEPRECATION_WARNINGS #endif /* FF_API_XVMC */ @@ -702,17 +702,17 @@ FF_ENABLE_DEPRECATION_WARNINGS if ((j % skip_amount) != 0) continue; - if (s->cur_pic->f.pict_type == AV_PICTURE_TYPE_I) { - int *linesize = s->cur_pic->f.linesize; - uint8_t *mb_ptr = s->cur_pic->f.data[0] + + if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) { + int *linesize = s->cur_pic.f->linesize; + uint8_t *mb_ptr = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; - uint8_t *last_mb_ptr = s->last_pic->f.data[0] + + uint8_t *last_mb_ptr = s->last_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; if (s->avctx->codec_id == AV_CODEC_ID_H264) { // FIXME } else { - ff_thread_await_progress(&s->last_pic->tf, mb_y, 0); + ff_thread_await_progress(s->last_pic.tf, mb_y, 0); } is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr, linesize[0], 16); @@ -720,7 +720,7 @@ FF_ENABLE_DEPRECATION_WARNINGS last_mb_ptr + linesize[0] * 16, linesize[0], 16); } else { - if (IS_INTRA(s->cur_pic->mb_type[mb_xy])) + if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) is_intra_likely++; else is_intra_likely--; @@ -818,7 +818,7 @@ void ff_er_add_slice(ERContext *s, int startx, int starty, void ff_er_frame_end(ERContext *s) { - int *linesize = s->cur_pic->f.linesize; + int *linesize = s->cur_pic.f->linesize; int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error; int distance; int threshold_part[4] = { 100, 100, 100 }; @@ -830,32 +830,16 @@ void ff_er_frame_end(ERContext *s) * though it should not crash if enabled. */ if (!s->avctx->error_concealment || s->error_count == 0 || s->avctx->hwaccel || - !s->cur_pic || s->cur_pic->field_picture || + !s->cur_pic.f || + s->cur_pic.field_picture || s->error_count == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom)) { return; }; - if (s->cur_pic->motion_val[0] == NULL) { - av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n"); - - for (i = 0; i < 2; i++) { - s->cur_pic->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t)); - s->cur_pic->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t)); - if (!s->cur_pic->ref_index_buf[i] || !s->cur_pic->motion_val_buf[i]) - break; - s->cur_pic->ref_index[i] = s->cur_pic->ref_index_buf[i]->data; - s->cur_pic->motion_val[i] = (int16_t (*)[2])s->cur_pic->motion_val_buf[i]->data + 4; - } - if (i < 2) { - for (i = 0; i < 2; i++) { - av_buffer_unref(&s->cur_pic->ref_index_buf[i]); - av_buffer_unref(&s->cur_pic->motion_val_buf[i]); - s->cur_pic->ref_index[i] = NULL; - s->cur_pic->motion_val[i] = NULL; - } - return; - } + if (!s->cur_pic.motion_val[0] || !s->cur_pic.ref_index[0]) { + av_log(s->avctx, AV_LOG_ERROR, "MVs not available, ER not possible.\n"); + return; } if (s->avctx->debug & FF_DEBUG_ER) { @@ -1013,26 +997,26 @@ void ff_er_frame_end(ERContext *s) continue; if (is_intra_likely) - s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4; + s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4; else - s->cur_pic->mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0; + s->cur_pic.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0; } // change inter to intra blocks if no reference frames are available - if (!(s->last_pic && s->last_pic->f.data[0]) && - !(s->next_pic && s->next_pic->f.data[0])) + if (!(s->last_pic.f && s->last_pic.f->data[0]) && + !(s->next_pic.f && s->next_pic.f->data[0])) for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; - if (!IS_INTRA(s->cur_pic->mb_type[mb_xy])) - s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4; + if (!IS_INTRA(s->cur_pic.mb_type[mb_xy])) + s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4; } /* handle inter blocks with damaged AC */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; - const int mb_type = s->cur_pic->mb_type[mb_xy]; - const int dir = !(s->last_pic && s->last_pic->f.data[0]); + const int mb_type = s->cur_pic.mb_type[mb_xy]; + const int dir = !(s->last_pic.f && s->last_pic.f->data[0]); const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD; int mv_type; @@ -1050,13 +1034,13 @@ void ff_er_frame_end(ERContext *s) int j; mv_type = MV_TYPE_8X8; for (j = 0; j < 4; j++) { - s->mv[0][j][0] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0]; - s->mv[0][j][1] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1]; + s->mv[0][j][0] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0]; + s->mv[0][j][1] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1]; } } else { mv_type = MV_TYPE_16X16; - s->mv[0][0][0] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0]; - s->mv[0][0][1] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1]; + s->mv[0][0][0] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0]; + s->mv[0][0][1] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1]; } s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */, @@ -1065,12 +1049,12 @@ void ff_er_frame_end(ERContext *s) } /* guess MVs */ - if (s->cur_pic->f.pict_type == AV_PICTURE_TYPE_B) { + if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_B) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int xy = mb_x * 2 + mb_y * 2 * s->b8_stride; const int mb_xy = mb_x + mb_y * s->mb_stride; - const int mb_type = s->cur_pic->mb_type[mb_xy]; + const int mb_type = s->cur_pic.mb_type[mb_xy]; int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; error = s->error_status_table[mb_xy]; @@ -1082,21 +1066,21 @@ void ff_er_frame_end(ERContext *s) if (!(error & ER_AC_ERROR)) continue; // undamaged inter - if (!(s->last_pic && s->last_pic->f.data[0])) + if (!(s->last_pic.f && s->last_pic.f->data[0])) mv_dir &= ~MV_DIR_FORWARD; - if (!(s->next_pic && s->next_pic->f.data[0])) + if (!(s->next_pic.f && s->next_pic.f->data[0])) mv_dir &= ~MV_DIR_BACKWARD; if (s->pp_time) { int time_pp = s->pp_time; int time_pb = s->pb_time; - ff_thread_await_progress(&s->next_pic->tf, mb_y, 0); + ff_thread_await_progress(s->next_pic.tf, mb_y, 0); - s->mv[0][0][0] = s->next_pic->motion_val[0][xy][0] * time_pb / time_pp; - s->mv[0][0][1] = s->next_pic->motion_val[0][xy][1] * time_pb / time_pp; - s->mv[1][0][0] = s->next_pic->motion_val[0][xy][0] * (time_pb - time_pp) / time_pp; - s->mv[1][0][1] = s->next_pic->motion_val[0][xy][1] * (time_pb - time_pp) / time_pp; + s->mv[0][0][0] = s->next_pic.motion_val[0][xy][0] * time_pb / time_pp; + s->mv[0][0][1] = s->next_pic.motion_val[0][xy][1] * time_pb / time_pp; + s->mv[1][0][0] = s->next_pic.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp; + s->mv[1][0][1] = s->next_pic.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp; } else { s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; @@ -1125,7 +1109,7 @@ FF_ENABLE_DEPRECATION_WARNINGS int16_t *dc_ptr; uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy = mb_x + mb_y * s->mb_stride; - const int mb_type = s->cur_pic->mb_type[mb_xy]; + const int mb_type = s->cur_pic.mb_type[mb_xy]; error = s->error_status_table[mb_xy]; @@ -1134,9 +1118,9 @@ FF_ENABLE_DEPRECATION_WARNINGS // if (error & ER_MV_ERROR) // continue; // inter data damaged FIXME is this good? - dest_y = s->cur_pic->f.data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; - dest_cb = s->cur_pic->f.data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; - dest_cr = s->cur_pic->f.data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; + dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; + dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; + dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride]; for (n = 0; n < 4; n++) { @@ -1176,7 +1160,7 @@ FF_ENABLE_DEPRECATION_WARNINGS for (mb_x = 0; mb_x < s->mb_width; mb_x++) { uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy = mb_x + mb_y * s->mb_stride; - const int mb_type = s->cur_pic->mb_type[mb_xy]; + const int mb_type = s->cur_pic.mb_type[mb_xy]; error = s->error_status_table[mb_xy]; @@ -1185,9 +1169,9 @@ FF_ENABLE_DEPRECATION_WARNINGS if (!(error & ER_AC_ERROR)) continue; // undamaged - dest_y = s->cur_pic->f.data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; - dest_cb = s->cur_pic->f.data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; - dest_cr = s->cur_pic->f.data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; + dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; + dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; + dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); } @@ -1195,19 +1179,19 @@ FF_ENABLE_DEPRECATION_WARNINGS if (s->avctx->error_concealment & FF_EC_DEBLOCK) { /* filter horizontal block boundaries */ - h_block_filter(s, s->cur_pic->f.data[0], s->mb_width * 2, + h_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2, s->mb_height * 2, linesize[0], 1); - h_block_filter(s, s->cur_pic->f.data[1], s->mb_width, + h_block_filter(s, s->cur_pic.f->data[1], s->mb_width, s->mb_height, linesize[1], 0); - h_block_filter(s, s->cur_pic->f.data[2], s->mb_width, + h_block_filter(s, s->cur_pic.f->data[2], s->mb_width, s->mb_height, linesize[2], 0); /* filter vertical block boundaries */ - v_block_filter(s, s->cur_pic->f.data[0], s->mb_width * 2, + v_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2, s->mb_height * 2, linesize[0], 1); - v_block_filter(s, s->cur_pic->f.data[1], s->mb_width, + v_block_filter(s, s->cur_pic.f->data[1], s->mb_width, s->mb_height, linesize[1], 0); - v_block_filter(s, s->cur_pic->f.data[2], s->mb_width, + v_block_filter(s, s->cur_pic.f->data[2], s->mb_width, s->mb_height, linesize[2], 0); } @@ -1217,13 +1201,14 @@ ec_clean: const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; - if (s->cur_pic->f.pict_type != AV_PICTURE_TYPE_B && + if (s->cur_pic.f->pict_type != AV_PICTURE_TYPE_B && (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) { s->mbskip_table[mb_xy] = 0; } s->mbintra_table[mb_xy] = 1; } - s->cur_pic = NULL; - s->next_pic = NULL; - s->last_pic = NULL; + + memset(&s->cur_pic, 0, sizeof(ERPicture)); + memset(&s->last_pic, 0, sizeof(ERPicture)); + memset(&s->next_pic, 0, sizeof(ERPicture)); } diff --git a/libavcodec/error_resilience.h b/libavcodec/error_resilience.h index f979656ab7..5171094a54 100644 --- a/libavcodec/error_resilience.h +++ b/libavcodec/error_resilience.h @@ -24,6 +24,7 @@ #include "avcodec.h" #include "dsputil.h" +#include "thread.h" ///< current MB is the first after a resync marker #define VP_START 1 @@ -37,6 +38,18 @@ #define ER_MB_ERROR (ER_AC_ERROR|ER_DC_ERROR|ER_MV_ERROR) #define ER_MB_END (ER_AC_END|ER_DC_END|ER_MV_END) +typedef struct ERPicture { + AVFrame *f; + ThreadFrame *tf; + + // it's the caller responsability to allocate these buffers + int16_t (*motion_val[2])[2]; + int8_t *ref_index[2]; + + uint32_t *mb_type; + int field_picture; +} ERPicture; + typedef struct ERContext { AVCodecContext *avctx; DSPContext *dsp; @@ -55,9 +68,9 @@ typedef struct ERContext { uint8_t *mbintra_table; int mv[2][4][2]; - struct Picture *cur_pic; - struct Picture *last_pic; - struct Picture *next_pic; + ERPicture cur_pic; + ERPicture last_pic; + ERPicture next_pic; uint16_t pp_time; uint16_t pb_time; diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 1ce772fa3f..a6724846cc 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -2974,9 +2974,11 @@ static int field_end(H264Context *h, int in_setup) * causes problems for the first MB line, too. */ if (CONFIG_ERROR_RESILIENCE && !FIELD_PICTURE(h)) { - h->er.cur_pic = h->cur_pic_ptr; - h->er.last_pic = h->ref_count[0] ? &h->ref_list[0][0] : NULL; - h->er.next_pic = h->ref_count[1] ? &h->ref_list[1][0] : NULL; + ff_mpeg_set_erpic(&h->er.cur_pic, h->cur_pic_ptr); + ff_mpeg_set_erpic(&h->er.last_pic, + h->ref_count[0] ? &h->ref_list[0][0] : NULL); + ff_mpeg_set_erpic(&h->er.next_pic, + h->ref_count[1] ? &h->ref_list[1][0] : NULL); ff_er_frame_end(&h->er); } emms_c(); diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 59c64b8e95..46754ddc2b 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -2482,13 +2482,32 @@ void ff_MPV_report_decode_progress(MpegEncContext *s) } #if CONFIG_ERROR_RESILIENCE +void ff_mpeg_set_erpic(ERPicture *dst, Picture *src) +{ + int i; + + if (!src) + return; + + dst->f = &src->f; + dst->tf = &src->tf; + + for (i = 0; i < 2; i++) { + dst->motion_val[i] = src->motion_val[i]; + dst->ref_index[i] = src->ref_index[i]; + } + + dst->mb_type = src->mb_type; + dst->field_picture = src->field_picture; +} + void ff_mpeg_er_frame_start(MpegEncContext *s) { ERContext *er = &s->er; - er->cur_pic = s->current_picture_ptr; - er->last_pic = s->last_picture_ptr; - er->next_pic = s->next_picture_ptr; + ff_mpeg_set_erpic(&er->cur_pic, s->current_picture_ptr); + ff_mpeg_set_erpic(&er->next_pic, s->next_picture_ptr); + ff_mpeg_set_erpic(&er->last_pic, s->last_picture_ptr); er->pp_time = s->pp_time; er->pb_time = s->pb_time; diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index 6b924f560a..95987fc5d2 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -799,7 +799,9 @@ void ff_MPV_report_decode_progress(MpegEncContext *s); int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src); void ff_set_qscale(MpegEncContext * s, int qscale); +/* Error resilience */ void ff_mpeg_er_frame_start(MpegEncContext *s); +void ff_mpeg_set_erpic(ERPicture *dst, Picture *src); int ff_dct_common_init(MpegEncContext *s); void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64], |