diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2015-06-08 20:58:13 +0200 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2015-06-08 21:05:12 +0200 |
commit | db8ae37a783aba18d8f869dae1824a3e3f984bf8 (patch) | |
tree | 280cfe9fdd15922660e34052e0dce0e93ab335c5 | |
parent | 34d278f9838e355b3b2c7a9c0f77d7fcaf37ce49 (diff) | |
parent | da0c8664b4dc906696803685f7e53ade68594ab8 (diff) | |
download | ffmpeg-db8ae37a783aba18d8f869dae1824a3e3f984bf8.tar.gz |
Merge commit 'da0c8664b4dc906696803685f7e53ade68594ab8'
* commit 'da0c8664b4dc906696803685f7e53ade68594ab8':
mpegvideo: Move various temporary buffers to a separate context
Conflicts:
libavcodec/mpegvideo.c
libavcodec/mpegvideo_enc.c
libavcodec/mpegvideo_motion.c
libavcodec/rv34.c
libavcodec/vc1_mc.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r-- | libavcodec/intrax8.c | 4 | ||||
-rw-r--r-- | libavcodec/mpegvideo.c | 67 | ||||
-rw-r--r-- | libavcodec/mpegvideo.h | 13 | ||||
-rw-r--r-- | libavcodec/mpegvideo_enc.c | 14 | ||||
-rw-r--r-- | libavcodec/mpegvideo_motion.c | 42 | ||||
-rw-r--r-- | libavcodec/rv34.c | 6 | ||||
-rw-r--r-- | libavcodec/snow.c | 2 | ||||
-rw-r--r-- | libavcodec/snowenc.c | 8 | ||||
-rw-r--r-- | libavcodec/vc1_mc.c | 40 | ||||
-rw-r--r-- | libavcodec/wmv2.c | 12 |
10 files changed, 107 insertions, 101 deletions
diff --git a/libavcodec/intrax8.c b/libavcodec/intrax8.c index 017536d644..cf01289fdf 100644 --- a/libavcodec/intrax8.c +++ b/libavcodec/intrax8.c @@ -303,7 +303,7 @@ static int x8_setup_spatial_predictor(IntraX8Context * const w, const int chroma int sum; int quant; - w->dsp.setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer, + w->dsp.setup_spatial_compensation(s->dest[chroma], s->sc.edge_emu_buffer, s->current_picture.f->linesize[chroma>0], &range, &sum, w->edges); if(chroma){ @@ -639,7 +639,7 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){ if(w->flat_dc){ dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f->linesize[!!chroma]); }else{ - w->dsp.spatial_compensation[w->orient]( s->edge_emu_buffer, + w->dsp.spatial_compensation[w->orient]( s->sc.edge_emu_buffer, s->dest[chroma], s->current_picture.f->linesize[!!chroma] ); } diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index e6e53268d8..6af7ad5d75 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -364,6 +364,7 @@ av_cold void ff_mpv_idct_init(MpegEncContext *s) static int frame_size_alloc(MpegEncContext *s, int linesize) { int alloc_size = FFALIGN(FFABS(linesize) + 64, 32); + ScratchpadContext *sc = &s->sc; if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) return 0; @@ -379,19 +380,19 @@ static int frame_size_alloc(MpegEncContext *s, int linesize) // at uvlinesize. It supports only YUV420 so 24x24 is enough // linesize * interlaced * MBsize // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines - FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68, + FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->sc.edge_emu_buffer, alloc_size, 4 * 68, fail); FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2, fail) - s->me.temp = s->me.scratchpad; - s->rd_scratchpad = s->me.scratchpad; - s->b_scratchpad = s->me.scratchpad; - s->obmc_scratchpad = s->me.scratchpad + 16; + s->me.temp = s->me.scratchpad; + sc->rd_scratchpad = s->me.scratchpad; + sc->b_scratchpad = s->me.scratchpad; + sc->obmc_scratchpad = s->me.scratchpad + 16; return 0; fail: - av_freep(&s->edge_emu_buffer); + av_freep(&sc->edge_emu_buffer); return AVERROR(ENOMEM); } @@ -466,7 +467,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) return -1; } - if (!s->edge_emu_buffer && + if (!s->sc.edge_emu_buffer && (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed to allocate context scratch buffers.\n"); @@ -740,12 +741,12 @@ static int init_duplicate_context(MpegEncContext *s) if (s->mb_height & 1) yc_size += 2*s->b8_stride + 2*s->mb_stride; - s->edge_emu_buffer = + s->sc.edge_emu_buffer = s->me.scratchpad = s->me.temp = - s->rd_scratchpad = - s->b_scratchpad = - s->obmc_scratchpad = NULL; + s->sc.rd_scratchpad = + s->sc.b_scratchpad = + s->sc.obmc_scratchpad = NULL; if (s->encoding) { FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map, @@ -787,12 +788,12 @@ static void free_duplicate_context(MpegEncContext *s) if (!s) return; - av_freep(&s->edge_emu_buffer); + av_freep(&s->sc.edge_emu_buffer); av_freep(&s->me.scratchpad); s->me.temp = - s->rd_scratchpad = - s->b_scratchpad = - s->obmc_scratchpad = NULL; + s->sc.rd_scratchpad = + s->sc.b_scratchpad = + s->sc.obmc_scratchpad = NULL; av_freep(&s->dct_error_sum); av_freep(&s->me.map); @@ -805,12 +806,12 @@ static void free_duplicate_context(MpegEncContext *s) static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src) { #define COPY(a) bak->a = src->a - COPY(edge_emu_buffer); + COPY(sc.edge_emu_buffer); COPY(me.scratchpad); COPY(me.temp); - COPY(rd_scratchpad); - COPY(b_scratchpad); - COPY(obmc_scratchpad); + COPY(sc.rd_scratchpad); + COPY(sc.b_scratchpad); + COPY(sc.obmc_scratchpad); COPY(me.map); COPY(me.score_map); COPY(blocks); @@ -845,7 +846,7 @@ int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src) // exchange uv FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]); } - if (!dst->edge_emu_buffer && + if (!dst->sc.edge_emu_buffer && (ret = frame_size_alloc(dst, dst->linesize)) < 0) { av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context " "scratch buffers.\n"); @@ -975,7 +976,7 @@ do {\ } // linesize dependend scratch buffer allocation - if (!s->edge_emu_buffer) + if (!s->sc.edge_emu_buffer) if (s1->linesize) { if (frame_size_alloc(s, s1->linesize) < 0) { av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context " @@ -2393,12 +2394,12 @@ static inline int hpel_motion_lowres(MpegEncContext *s, if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) || (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src, s->linesize, s->linesize, w + 1, (h + 1) << field_based, src_x, src_y << field_based, h_edge_pos, v_edge_pos); - src = s->edge_emu_buffer; + src = s->sc.edge_emu_buffer; emu = 1; } @@ -2495,14 +2496,14 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 || (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y, linesize >> field_based, linesize >> field_based, 17, 17 + field_based, src_x, src_y << field_based, h_edge_pos, v_edge_pos); - ptr_y = s->edge_emu_buffer; + ptr_y = s->sc.edge_emu_buffer; if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { - uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize; + uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; uint8_t *vbuf =ubuf + 9 * s->uvlinesize; s->vdsp.emulated_edge_mc(ubuf, ptr_cb, uvlinesize >> field_based, uvlinesize >> field_based, @@ -2583,11 +2584,11 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s, ptr = ref_picture[1] + offset; if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) || (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, s->uvlinesize, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; emu = 1; } sx = (sx << 2) >> lowres; @@ -2596,11 +2597,11 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s, ptr = ref_picture[2] + offset; if (emu) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, s->uvlinesize, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; } pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy); } @@ -2932,9 +2933,9 @@ void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], dest_cb= s->dest[1]; dest_cr= s->dest[2]; }else{ - dest_y = s->b_scratchpad; - dest_cb= s->b_scratchpad+16*linesize; - dest_cr= s->b_scratchpad+32*linesize; + dest_y = s->sc.b_scratchpad; + dest_cb= s->sc.b_scratchpad+16*linesize; + dest_cr= s->sc.b_scratchpad+32*linesize; } if (!s->mb_intra) { diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index c72f035550..9f15e91861 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -143,6 +143,13 @@ typedef struct Picture{ uint64_t error[AV_NUM_DATA_POINTERS]; } Picture; +typedef struct ScratchpadContext { + uint8_t *edge_emu_buffer; ///< temporary buffer for if MVs point to out-of-frame data + uint8_t *rd_scratchpad; ///< scratchpad for rate distortion mb decision + uint8_t *obmc_scratchpad; + uint8_t *b_scratchpad; ///< scratchpad used for writing into write only buffers +} ScratchpadContext; + /** * MpegEncContext. */ @@ -266,10 +273,8 @@ typedef struct MpegEncContext { uint8_t *mbintra_table; ///< used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding uint8_t *cbp_table; ///< used to store cbp, ac_pred for partitioned decoding uint8_t *pred_dir_table; ///< used to store pred_dir for partitioned decoding - uint8_t *edge_emu_buffer; ///< temporary buffer for if MVs point to out-of-frame data - uint8_t *rd_scratchpad; ///< scratchpad for rate distortion mb decision - uint8_t *obmc_scratchpad; - uint8_t *b_scratchpad; ///< scratchpad used for writing into write only buffers + + ScratchpadContext sc; int qscale; ///< QP int chroma_qscale; ///< chroma QP diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index d7279c15d3..80b33618c7 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -2100,7 +2100,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width; if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){ - uint8_t *ebuf = s->edge_emu_buffer + 36 * wrap_y; + uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y; int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift; int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift; s->vdsp.emulated_edge_mc(ebuf, ptr_y, @@ -2512,9 +2512,9 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE if(*next_block){ memcpy(dest_backup, s->dest, sizeof(s->dest)); - s->dest[0] = s->rd_scratchpad; - s->dest[1] = s->rd_scratchpad + 16*s->linesize; - s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8; + s->dest[0] = s->sc.rd_scratchpad; + s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize; + s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8; av_assert0(s->linesize >= 32); //FIXME } @@ -3213,9 +3213,9 @@ static int encode_thread(AVCodecContext *c, void *arg){ ff_h263_update_motion_val(s); if(next_block==0){ //FIXME 16 vs linesize16 - s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16); - s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8); - s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8); + s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16); + s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8); + s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8); } if(s->avctx->mb_decision == FF_MB_DECISION_BITS) diff --git a/libavcodec/mpegvideo_motion.c b/libavcodec/mpegvideo_motion.c index 060f3f25b1..f1956f0e73 100644 --- a/libavcodec/mpegvideo_motion.c +++ b/libavcodec/mpegvideo_motion.c @@ -64,12 +64,12 @@ static void gmc1_motion(MpegEncContext *s, if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) || (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, linesize, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; } if ((motion_x | motion_y) & 7) { @@ -108,12 +108,12 @@ static void gmc1_motion(MpegEncContext *s, ptr = ref_picture[1] + offset; if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) || (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, uvlinesize, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; emu = 1; } s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8, @@ -121,12 +121,12 @@ static void gmc1_motion(MpegEncContext *s, ptr = ref_picture[2] + offset; if (emu) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, uvlinesize, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; } s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x & 15, motion_y & 15, 128 - s->no_rounding); @@ -213,12 +213,12 @@ static inline int hpel_motion(MpegEncContext *s, if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) || (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src, s->linesize, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos); - src = s->edge_emu_buffer; + src = s->sc.edge_emu_buffer; emu = 1; } pix_op[dxy](dest, src, s->linesize, 8); @@ -318,14 +318,14 @@ void mpeg_motion_internal(MpegEncContext *s, return; } src_y = (unsigned)src_y << field_based; - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y, s->linesize, s->linesize, 17, 17 + field_based, src_x, src_y, s->h_edge_pos, s->v_edge_pos); - ptr_y = s->edge_emu_buffer; + ptr_y = s->sc.edge_emu_buffer; if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { - uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize; + uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; uint8_t *vbuf = ubuf + 9 * s->uvlinesize; uvsrc_y = (unsigned)uvsrc_y << field_based; s->vdsp.emulated_edge_mc(ubuf, ptr_cb, @@ -478,7 +478,7 @@ static inline void obmc_motion(MpegEncContext *s, if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) { ptr[i] = ptr[MID]; } else { - ptr[i] = s->obmc_scratchpad + 8 * (i & 1) + + ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) + s->linesize * 8 * (i >> 1); hpel_motion(s, ptr[i], src, src_x, src_y, pix_op, mv[i][0], mv[i][1]); @@ -541,14 +541,14 @@ static inline void qpel_motion(MpegEncContext *s, if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) || (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y, s->linesize, s->linesize, 17, 17 + field_based, src_x, src_y << field_based, s->h_edge_pos, s->v_edge_pos); - ptr_y = s->edge_emu_buffer; + ptr_y = s->sc.edge_emu_buffer; if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { - uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize; + uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; uint8_t *vbuf = ubuf + 9 * s->uvlinesize; s->vdsp.emulated_edge_mc(ubuf, ptr_cb, s->uvlinesize, s->uvlinesize, @@ -625,22 +625,22 @@ static void chroma_4mv_motion(MpegEncContext *s, ptr = ref_picture[1] + offset; if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) || (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, s->uvlinesize, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; emu = 1; } pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8); ptr = ref_picture[2] + offset; if (emu) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, s->uvlinesize, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; } pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8); } @@ -783,13 +783,13 @@ static inline void apply_8x8(MpegEncContext *s, ptr = ref_picture[0] + (src_y * s->linesize) + (src_x); if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) || (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, s->linesize, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; } dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize; qpix_op[1][dxy](dest, ptr, s->linesize); diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index a232ab2593..c109a9e003 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -726,12 +726,12 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type, (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 || (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) { srcY -= 2 + 2*s->linesize; - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY, s->linesize, s->linesize, (width << 3) + 6, (height << 3) + 6, src_x - 2, src_y - 2, s->h_edge_pos, s->v_edge_pos); - srcY = s->edge_emu_buffer + 2 + 2*s->linesize; + srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize; emu = 1; } if(!weighted){ @@ -756,7 +756,7 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type, is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16); qpel_mc[!is16x16][dxy](Y, srcY, s->linesize); if (emu) { - uint8_t *uvbuf = s->edge_emu_buffer; + uint8_t *uvbuf = s->sc.edge_emu_buffer; s->vdsp.emulated_edge_mc(uvbuf, srcU, s->uvlinesize, s->uvlinesize, diff --git a/libavcodec/snow.c b/libavcodec/snow.c index 5201d57422..fc2e7279f3 100644 --- a/libavcodec/snow.c +++ b/libavcodec/snow.c @@ -704,7 +704,7 @@ av_cold void ff_snow_common_end(SnowContext *s) av_freep(&s->m.me.scratchpad); av_freep(&s->m.me.map); av_freep(&s->m.me.score_map); - av_freep(&s->m.obmc_scratchpad); + av_freep(&s->m.sc.obmc_scratchpad); av_freep(&s->block); av_freep(&s->scratchbuf); diff --git a/libavcodec/snowenc.c b/libavcodec/snowenc.c index 5c5cc66505..bd5c0fde15 100644 --- a/libavcodec/snowenc.c +++ b/libavcodec/snowenc.c @@ -75,8 +75,8 @@ static av_cold int encode_init(AVCodecContext *avctx) s->m.me.scratchpad= av_mallocz_array((avctx->width+64), 2*16*2*sizeof(uint8_t)); s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t)); s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t)); - s->m.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t)); - if (!s->m.me.scratchpad || !s->m.me.map || !s->m.me.score_map || !s->m.obmc_scratchpad) + s->m.sc.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t)); + if (!s->m.me.scratchpad || !s->m.me.map || !s->m.me.score_map || !s->m.sc.obmc_scratchpad) return AVERROR(ENOMEM); ff_h263_encode_init(&s->m); //mv_penalty @@ -501,7 +501,7 @@ static int get_dc(SnowContext *s, int mb_x, int mb_y, int plane_index){ const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size; const int ref_stride= s->current_picture->linesize[plane_index]; uint8_t *src= s-> input_picture->data[plane_index]; - IDWTELEM *dst= (IDWTELEM*)s->m.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned + IDWTELEM *dst= (IDWTELEM*)s->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned const int b_stride = s->b_width << s->block_max_depth; const int w= p->width; const int h= p->height; @@ -596,7 +596,7 @@ static int get_block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index, uin const int ref_stride= s->current_picture->linesize[plane_index]; uint8_t *dst= s->current_picture->data[plane_index]; uint8_t *src= s-> input_picture->data[plane_index]; - IDWTELEM *pred= (IDWTELEM*)s->m.obmc_scratchpad + plane_index*block_size*block_size*4; + IDWTELEM *pred= (IDWTELEM*)s->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4; uint8_t *cur = s->scratchbuf; uint8_t *tmp = s->emu_edge_buffer; const int b_stride = s->b_width << s->block_max_depth; diff --git a/libavcodec/vc1_mc.c b/libavcodec/vc1_mc.c index 1a78c178db..535824339e 100644 --- a/libavcodec/vc1_mc.c +++ b/libavcodec/vc1_mc.c @@ -271,25 +271,25 @@ void ff_vc1_mc_1mv(VC1Context *v, int dir) /* for grayscale we should not try to read from unknown area */ if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY) { - srcU = s->edge_emu_buffer + 18 * s->linesize; - srcV = s->edge_emu_buffer + 18 * s->linesize; + srcU = s->sc.edge_emu_buffer + 18 * s->linesize; + srcV = s->sc.edge_emu_buffer + 18 * s->linesize; } if (v->rangeredfrm || use_ic || s->h_edge_pos < 22 || v_edge_pos < 22 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) { - uint8_t *ubuf = s->edge_emu_buffer + 19 * s->linesize; + uint8_t *ubuf = s->sc.edge_emu_buffer + 19 * s->linesize; uint8_t *vbuf = ubuf + 9 * s->uvlinesize; const int k = 17 + s->mspel * 2; srcY -= s->mspel * (1 + s->linesize); - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY, s->linesize, s->linesize, k, k, src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, v_edge_pos); - srcY = s->edge_emu_buffer; + srcY = s->sc.edge_emu_buffer; s->vdsp.emulated_edge_mc(ubuf, srcU, s->uvlinesize, s->uvlinesize, 8 + 1, 8 + 1, @@ -467,12 +467,12 @@ void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg) srcY -= s->mspel * (1 + (s->linesize << fieldmv)); /* check emulate edge stride and offset */ - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY, s->linesize, s->linesize, k, k << fieldmv, src_x - s->mspel, src_y - (s->mspel << fieldmv), s->h_edge_pos, v_edge_pos); - srcY = s->edge_emu_buffer; + srcY = s->sc.edge_emu_buffer; /* if we deal with range reduction we need to scale source blocks */ if (v->rangeredfrm) { vc1_scale_luma(srcY, k, s->linesize << fieldmv); @@ -602,16 +602,16 @@ void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir) || s->h_edge_pos < 18 || v_edge_pos < 18 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcU, s->uvlinesize, s->uvlinesize, 8 + 1, 8 + 1, uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1); - s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16, srcV, s->uvlinesize, s->uvlinesize, 8 + 1, 8 + 1, uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1); - srcU = s->edge_emu_buffer; - srcV = s->edge_emu_buffer + 16; + srcU = s->sc.edge_emu_buffer; + srcV = s->sc.edge_emu_buffer + 16; /* if we deal with range reduction we need to scale source blocks */ if (v->rangeredfrm) { @@ -704,16 +704,16 @@ void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg) || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv) || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcU, s->uvlinesize, s->uvlinesize, 5, (5 << fieldmv), uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos); - s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16, srcV, s->uvlinesize, s->uvlinesize, 5, (5 << fieldmv), uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos); - srcU = s->edge_emu_buffer; - srcV = s->edge_emu_buffer + 16; + srcU = s->sc.edge_emu_buffer; + srcV = s->sc.edge_emu_buffer + 16; /* if we deal with intensity compensation we need to scale source blocks */ if (use_ic) { @@ -802,24 +802,24 @@ void ff_vc1_interp_mc(VC1Context *v) /* for grayscale we should not try to read from unknown area */ if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY) { - srcU = s->edge_emu_buffer + 18 * s->linesize; - srcV = s->edge_emu_buffer + 18 * s->linesize; + srcU = s->sc.edge_emu_buffer + 18 * s->linesize; + srcV = s->sc.edge_emu_buffer + 18 * s->linesize; } if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) { - uint8_t *ubuf = s->edge_emu_buffer + 19 * s->linesize; + uint8_t *ubuf = s->sc.edge_emu_buffer + 19 * s->linesize; uint8_t *vbuf = ubuf + 9 * s->uvlinesize; const int k = 17 + s->mspel * 2; srcY -= s->mspel * (1 + s->linesize); - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY, s->linesize, s->linesize, k, k, src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, v_edge_pos); - srcY = s->edge_emu_buffer; + srcY = s->sc.edge_emu_buffer; s->vdsp.emulated_edge_mc(ubuf, srcU, s->uvlinesize, s->uvlinesize, 8 + 1, 8 + 1, diff --git a/libavcodec/wmv2.c b/libavcodec/wmv2.c index 1cfb7eb0e4..963bda05ec 100644 --- a/libavcodec/wmv2.c +++ b/libavcodec/wmv2.c @@ -127,11 +127,11 @@ void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, if (src_x < 1 || src_y < 1 || src_x + 17 >= s->h_edge_pos || src_y + h + 1 >= v_edge_pos) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, s->linesize, 19, 19, src_x - 1, src_y - 1, s->h_edge_pos, s->v_edge_pos); - ptr = s->edge_emu_buffer + 1 + s->linesize; + ptr = s->sc.edge_emu_buffer + 1 + s->linesize; emu = 1; } @@ -162,23 +162,23 @@ void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, offset = (src_y * uvlinesize) + src_x; ptr = ref_picture[1] + offset; if (emu) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, s->uvlinesize, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; } pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1); ptr = ref_picture[2] + offset; if (emu) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, s->uvlinesize, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; } pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1); } |