diff options
author | Clément Bœsch <u@pkh.me> | 2014-03-28 22:33:51 +0100 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2016-08-03 10:57:55 +0200 |
commit | 3cda179f180e48cda9afee9b1875f10e89a848a6 (patch) | |
tree | 886128e7bb1ad6bcd97a653312f4d0e4b33b5ec3 /libavcodec | |
parent | 8be8444d01d850b7ff2363f6886bfa8a8ea4a449 (diff) | |
download | ffmpeg-3cda179f180e48cda9afee9b1875f10e89a848a6.tar.gz |
vp9mc/x86: rename ff_* to ff_vp9_*
Signed-off-by: Anton Khirnov <anton@khirnov.net>
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/x86/vp9dsp_init.c | 94 | ||||
-rw-r--r-- | libavcodec/x86/vp9mc.asm | 14 |
2 files changed, 54 insertions, 54 deletions
diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 2b94af3480..13662961af 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -29,11 +29,11 @@ #if HAVE_YASM -#define fpel_func(avg, sz, opt) \ -void ff_ ## avg ## sz ## _ ## opt(uint8_t *dst, const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, int mx, int my) +#define fpel_func(avg, sz, opt) \ +void ff_vp9_ ## avg ## sz ## _ ## opt(uint8_t *dst, const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, int mx, int my) fpel_func(put, 4, mmx); fpel_func(put, 8, mmx); @@ -47,14 +47,14 @@ fpel_func(avg, 32, sse2); fpel_func(avg, 64, sse2); #undef fpel_func -#define mc_func(avg, sz, dir, opt) \ -void \ -ff_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ - const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, \ - const int8_t (*filter)[16]) +#define mc_func(avg, sz, dir, opt) \ +void \ +ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ + const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, \ + const int8_t (*filter)[16]) #define mc_funcs(sz) \ mc_func(put, sz, h, ssse3); \ @@ -73,19 +73,19 @@ mc_funcs(16); #define mc_rep_func(avg, sz, hsz, dir, opt) \ static av_always_inline void \ -ff_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ +ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ const uint8_t *src, \ ptrdiff_t dst_stride, \ ptrdiff_t src_stride, \ int h, \ const int8_t (*filter)[16]) \ { \ - ff_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst, src, \ + ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst, src, \ dst_stride, \ src_stride, \ h, \ filter); \ - ff_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst + hsz, \ + ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst + hsz, \ src + hsz, \ dst_stride, \ src_stride, \ @@ -109,23 +109,23 @@ mc_rep_funcs(64, 32); extern const int8_t ff_filters_ssse3[3][15][4][16]; -#define filter_8tap_2d_fn(op, sz, f, fname) \ -static void \ -op ## _8tap_ ## fname ## _ ## sz ## hv_ssse3(uint8_t *dst, \ - const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, int mx, int my) \ -{ \ - LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \ - ff_put_8tap_1d_h_ ## sz ## _ssse3(temp, src - 3 * src_stride, \ - 64, src_stride, \ - h + 7, \ - ff_filters_ssse3[f][mx - 1]); \ - ff_ ## op ## _8tap_1d_v_ ## sz ## _ssse3(dst, temp + 3 * 64, \ - dst_stride, 64, \ - h, \ - ff_filters_ssse3[f][my - 1]); \ +#define filter_8tap_2d_fn(op, sz, f, fname) \ +static void \ +op ## _8tap_ ## fname ## _ ## sz ## hv_ssse3(uint8_t *dst, \ + const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, int mx, int my) \ +{ \ + LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \ + ff_vp9_put_8tap_1d_h_ ## sz ## _ssse3(temp, src - 3 * src_stride, \ + 64, src_stride, \ + h + 7, \ + ff_filters_ssse3[f][mx - 1]); \ + ff_vp9_ ## op ## _8tap_1d_v_ ## sz ## _ssse3(dst, temp + 3 * 64, \ + dst_stride, 64, \ + h, \ + ff_filters_ssse3[f][my - 1]); \ } #define filters_8tap_2d_fn(op, sz) \ @@ -147,19 +147,19 @@ filters_8tap_2d_fn2(avg) #undef filters_8tap_2d_fn #undef filter_8tap_2d_fn -#define filter_8tap_1d_fn(op, sz, f, fname, dir, dvar) \ -static void \ -op ## _8tap_ ## fname ## _ ## sz ## dir ## _ssse3(uint8_t *dst, \ - const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, int mx, \ - int my) \ -{ \ - ff_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ssse3(dst, src, \ - dst_stride, \ - src_stride, h, \ - ff_filters_ssse3[f][dvar - 1]); \ +#define filter_8tap_1d_fn(op, sz, f, fname, dir, dvar) \ +static void \ +op ## _8tap_ ## fname ## _ ## sz ## dir ## _ssse3(uint8_t *dst, \ + const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, int mx, \ + int my) \ +{ \ + ff_vp9_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ssse3(dst, src, \ + dst_stride, \ + src_stride, h,\ + ff_filters_ssse3[f][dvar - 1]); \ } #define filters_8tap_1d_fn(op, sz, dir, dvar) \ @@ -197,7 +197,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \ - dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_ ## type ## sz ## _ ## opt + dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_ ## type ## sz ## _ ## opt #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, opt) \ diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm index 4c747ab1bb..41b22204c0 100644 --- a/libavcodec/x86/vp9mc.asm +++ b/libavcodec/x86/vp9mc.asm @@ -86,7 +86,7 @@ SECTION .text %macro filter_h_fn 1 %assign %%px mmsize/2 -cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filtery +cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filtery mova m6, [pw_256] mova m7, [filteryq+ 0] %if ARCH_X86_64 && mmsize > 8 @@ -147,7 +147,7 @@ filter_h_fn avg %if ARCH_X86_64 %macro filter_hx2_fn 1 %assign %%px mmsize -cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filtery +cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filtery mova m13, [pw_256] mova m8, [filteryq+ 0] mova m9, [filteryq+16] @@ -203,9 +203,9 @@ filter_hx2_fn avg %macro filter_v_fn 1 %assign %%px mmsize/2 %if ARCH_X86_64 -cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, src, dstride, sstride, h, filtery, src4, sstride3 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, src, dstride, sstride, h, filtery, src4, sstride3 %else -cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery, src4, sstride3 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery, src4, sstride3 mov filteryq, r5mp %define hd r4mp %endif @@ -276,7 +276,7 @@ filter_v_fn avg %macro filter_vx2_fn 1 %assign %%px mmsize -cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filtery, src4, sstride3 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filtery, src4, sstride3 mova m13, [pw_256] lea sstride3q, [sstrideq*3] lea src4q, [srcq+sstrideq] @@ -346,11 +346,11 @@ filter_vx2_fn avg %endif %if %2 <= 16 -cglobal %1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3 +cglobal vp9_%1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3 lea sstride3q, [sstrideq*3] lea dstride3q, [dstrideq*3] %else -cglobal %1%2, 5, 5, 4, dst, src, dstride, sstride, h +cglobal vp9_%1%2, 5, 5, 4, dst, src, dstride, sstride, h %endif .loop: %%srcfn m0, [srcq] |