diff options
author | Seppo Tomperi <seppo.tomperi@vtt.fi> | 2015-02-25 09:44:55 +0000 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2015-02-25 18:39:51 +0100 |
commit | 63ca0fe8288dbd300c9bb814cb671e5d889f691c (patch) | |
tree | 85890d304a7de05943cece78d86925ac353b19cb /libavcodec/arm | |
parent | d1e2d59f23a9c69b0b088a9a528b29f20c2948c1 (diff) | |
download | ffmpeg-63ca0fe8288dbd300c9bb814cb671e5d889f691c.tar.gz |
avcodec/hevcdsp: ARM NEON optimized qpel functions
uses comma as macro parameter separator
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/arm')
-rw-r--r-- | libavcodec/arm/Makefile | 3 | ||||
-rw-r--r-- | libavcodec/arm/hevcdsp_init_neon.c | 159 | ||||
-rw-r--r-- | libavcodec/arm/hevcdsp_qpel_neon.S | 999 |
3 files changed, 1160 insertions, 1 deletions
diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile index 240ee3f563..d1b4826af4 100644 --- a/libavcodec/arm/Makefile +++ b/libavcodec/arm/Makefile @@ -138,7 +138,8 @@ NEON-OBJS-$(CONFIG_DCA_DECODER) += arm/dcadsp_neon.o \ arm/synth_filter_neon.o NEON-OBJS-$(CONFIG_HEVC_DECODER) += arm/hevcdsp_init_neon.o \ arm/hevcdsp_deblock_neon.o \ - arm/hevcdsp_idct_neon.o + arm/hevcdsp_idct_neon.o \ + arm/hevcdsp_qpel_neon.o NEON-OBJS-$(CONFIG_RV30_DECODER) += arm/rv34dsp_neon.o NEON-OBJS-$(CONFIG_RV40_DECODER) += arm/rv34dsp_neon.o \ arm/rv40dsp_neon.o diff --git a/libavcodec/arm/hevcdsp_init_neon.c b/libavcodec/arm/hevcdsp_init_neon.c index 1f132db18d..61e6462cbd 100644 --- a/libavcodec/arm/hevcdsp_init_neon.c +++ b/libavcodec/arm/hevcdsp_init_neon.c @@ -42,10 +42,110 @@ void ff_hevc_transform_add_16x16_neon_8(uint8_t *_dst, int16_t *coeffs, void ff_hevc_transform_add_32x32_neon_8(uint8_t *_dst, int16_t *coeffs, ptrdiff_t stride); +#define PUT_PIXELS(name) \ + void name(int16_t *dst, uint8_t *src, \ + ptrdiff_t srcstride, int height, \ + intptr_t mx, intptr_t my, int width) +PUT_PIXELS(ff_hevc_put_pixels_w2_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w4_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w6_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w8_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w12_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w16_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w24_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w32_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w48_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w64_neon_8); +#undef PUT_PIXELS + +static void (*put_hevc_qpel_neon[4][4])(int16_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int height, int width); +static void (*put_hevc_qpel_uw_neon[4][4])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, + int width, int height, int16_t* src2, ptrdiff_t src2stride); +void ff_hevc_put_qpel_neon_wrapper(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width); +void ff_hevc_put_qpel_uni_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width); +void ff_hevc_put_qpel_bi_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int16_t *src2, + int height, intptr_t mx, intptr_t my, int width); +#define QPEL_FUNC(name) \ + void name(int16_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, \ + int height, int width) + +QPEL_FUNC(ff_hevc_put_qpel_v1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_v2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_v3_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h3_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h1v1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h1v2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h1v3_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h2v1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h2v2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h2v3_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h3v1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h3v2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h3v3_neon_8); +#undef QPEL_FUNC + +#define QPEL_FUNC_UW_PIX(name) \ + void name(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, \ + int height, intptr_t mx, intptr_t my, int width); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w4_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w8_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w16_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w24_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w32_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w48_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w64_neon_8); +#undef QPEL_FUNC_UW_PIX + +#define QPEL_FUNC_UW(name) \ + void name(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, \ + int width, int height, int16_t* src2, ptrdiff_t src2stride); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_pixels_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_v1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_v2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_v3_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1v1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1v2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1v3_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2v1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2v2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2v3_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v3_neon_8); +#undef QPEL_FUNC_UW + +void ff_hevc_put_qpel_neon_wrapper(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width) { + + put_hevc_qpel_neon[my][mx](dst, MAX_PB_SIZE, src, srcstride, height, width); +} + +void ff_hevc_put_qpel_uni_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width) { + + put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, height, NULL, 0); +} + +void ff_hevc_put_qpel_bi_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int16_t *src2, + int height, intptr_t mx, intptr_t my, int width) { + put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, height, src2, MAX_PB_SIZE); +} + static av_cold void hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth) { #if HAVE_NEON if (bit_depth == 8) { + int x; c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_neon; c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_neon; c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_neon; @@ -61,6 +161,65 @@ static av_cold void hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth) c->transform_add[2] = ff_hevc_transform_add_16x16_neon_8; c->transform_add[3] = ff_hevc_transform_add_32x32_neon_8; c->idct_4x4_luma = ff_hevc_transform_luma_4x4_neon_8; + put_hevc_qpel_neon[1][0] = ff_hevc_put_qpel_v1_neon_8; + put_hevc_qpel_neon[2][0] = ff_hevc_put_qpel_v2_neon_8; + put_hevc_qpel_neon[3][0] = ff_hevc_put_qpel_v3_neon_8; + put_hevc_qpel_neon[0][1] = ff_hevc_put_qpel_h1_neon_8; + put_hevc_qpel_neon[0][2] = ff_hevc_put_qpel_h2_neon_8; + put_hevc_qpel_neon[0][3] = ff_hevc_put_qpel_h3_neon_8; + put_hevc_qpel_neon[1][1] = ff_hevc_put_qpel_h1v1_neon_8; + put_hevc_qpel_neon[1][2] = ff_hevc_put_qpel_h2v1_neon_8; + put_hevc_qpel_neon[1][3] = ff_hevc_put_qpel_h3v1_neon_8; + put_hevc_qpel_neon[2][1] = ff_hevc_put_qpel_h1v2_neon_8; + put_hevc_qpel_neon[2][2] = ff_hevc_put_qpel_h2v2_neon_8; + put_hevc_qpel_neon[2][3] = ff_hevc_put_qpel_h3v2_neon_8; + put_hevc_qpel_neon[3][1] = ff_hevc_put_qpel_h1v3_neon_8; + put_hevc_qpel_neon[3][2] = ff_hevc_put_qpel_h2v3_neon_8; + put_hevc_qpel_neon[3][3] = ff_hevc_put_qpel_h3v3_neon_8; + put_hevc_qpel_uw_neon[1][0] = ff_hevc_put_qpel_uw_v1_neon_8; + put_hevc_qpel_uw_neon[2][0] = ff_hevc_put_qpel_uw_v2_neon_8; + put_hevc_qpel_uw_neon[3][0] = ff_hevc_put_qpel_uw_v3_neon_8; + put_hevc_qpel_uw_neon[0][1] = ff_hevc_put_qpel_uw_h1_neon_8; + put_hevc_qpel_uw_neon[0][2] = ff_hevc_put_qpel_uw_h2_neon_8; + put_hevc_qpel_uw_neon[0][3] = ff_hevc_put_qpel_uw_h3_neon_8; + put_hevc_qpel_uw_neon[1][1] = ff_hevc_put_qpel_uw_h1v1_neon_8; + put_hevc_qpel_uw_neon[1][2] = ff_hevc_put_qpel_uw_h2v1_neon_8; + put_hevc_qpel_uw_neon[1][3] = ff_hevc_put_qpel_uw_h3v1_neon_8; + put_hevc_qpel_uw_neon[2][1] = ff_hevc_put_qpel_uw_h1v2_neon_8; + put_hevc_qpel_uw_neon[2][2] = ff_hevc_put_qpel_uw_h2v2_neon_8; + put_hevc_qpel_uw_neon[2][3] = ff_hevc_put_qpel_uw_h3v2_neon_8; + put_hevc_qpel_uw_neon[3][1] = ff_hevc_put_qpel_uw_h1v3_neon_8; + put_hevc_qpel_uw_neon[3][2] = ff_hevc_put_qpel_uw_h2v3_neon_8; + put_hevc_qpel_uw_neon[3][3] = ff_hevc_put_qpel_uw_h3v3_neon_8; + for (x = 0; x < 10; x++) { + c->put_hevc_qpel[x][1][0] = ff_hevc_put_qpel_neon_wrapper; + c->put_hevc_qpel[x][0][1] = ff_hevc_put_qpel_neon_wrapper; + c->put_hevc_qpel[x][1][1] = ff_hevc_put_qpel_neon_wrapper; + c->put_hevc_qpel_uni[x][1][0] = ff_hevc_put_qpel_uni_neon_wrapper; + c->put_hevc_qpel_uni[x][0][1] = ff_hevc_put_qpel_uni_neon_wrapper; + c->put_hevc_qpel_uni[x][1][1] = ff_hevc_put_qpel_uni_neon_wrapper; + c->put_hevc_qpel_bi[x][1][0] = ff_hevc_put_qpel_bi_neon_wrapper; + c->put_hevc_qpel_bi[x][0][1] = ff_hevc_put_qpel_bi_neon_wrapper; + c->put_hevc_qpel_bi[x][1][1] = ff_hevc_put_qpel_bi_neon_wrapper; + } + c->put_hevc_qpel[0][0][0] = ff_hevc_put_pixels_w2_neon_8; + c->put_hevc_qpel[1][0][0] = ff_hevc_put_pixels_w4_neon_8; + c->put_hevc_qpel[2][0][0] = ff_hevc_put_pixels_w6_neon_8; + c->put_hevc_qpel[3][0][0] = ff_hevc_put_pixels_w8_neon_8; + c->put_hevc_qpel[4][0][0] = ff_hevc_put_pixels_w12_neon_8; + c->put_hevc_qpel[5][0][0] = ff_hevc_put_pixels_w16_neon_8; + c->put_hevc_qpel[6][0][0] = ff_hevc_put_pixels_w24_neon_8; + c->put_hevc_qpel[7][0][0] = ff_hevc_put_pixels_w32_neon_8; + c->put_hevc_qpel[8][0][0] = ff_hevc_put_pixels_w48_neon_8; + c->put_hevc_qpel[9][0][0] = ff_hevc_put_pixels_w64_neon_8; + + c->put_hevc_qpel_uni[1][0][0] = ff_hevc_put_qpel_uw_pixels_w4_neon_8; + c->put_hevc_qpel_uni[3][0][0] = ff_hevc_put_qpel_uw_pixels_w8_neon_8; + c->put_hevc_qpel_uni[5][0][0] = ff_hevc_put_qpel_uw_pixels_w16_neon_8; + c->put_hevc_qpel_uni[6][0][0] = ff_hevc_put_qpel_uw_pixels_w24_neon_8; + c->put_hevc_qpel_uni[7][0][0] = ff_hevc_put_qpel_uw_pixels_w32_neon_8; + c->put_hevc_qpel_uni[8][0][0] = ff_hevc_put_qpel_uw_pixels_w48_neon_8; + c->put_hevc_qpel_uni[9][0][0] = ff_hevc_put_qpel_uw_pixels_w64_neon_8; } #endif // HAVE_NEON } diff --git a/libavcodec/arm/hevcdsp_qpel_neon.S b/libavcodec/arm/hevcdsp_qpel_neon.S new file mode 100644 index 0000000000..86f92cf75a --- /dev/null +++ b/libavcodec/arm/hevcdsp_qpel_neon.S @@ -0,0 +1,999 @@ +/* + * Copyright (c) 2014 - 2015 Seppo Tomperi <seppo.tomperi@vtt.fi> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/arm/asm.S" +#include "neon.S" + +#define MAX_PB_SIZE #64 + +.macro regshuffle_d8 + vmov d16, d17 + vmov d17, d18 + vmov d18, d19 + vmov d19, d20 + vmov d20, d21 + vmov d21, d22 + vmov d22, d23 +.endm + +.macro regshuffle_q8 + vmov q0, q1 + vmov q1, q2 + vmov q2, q3 + vmov q3, q4 + vmov q4, q5 + vmov q5, q6 + vmov q6, q7 +.endm + +.macro vextin8 + pld [r2] + vld1.8 {q11}, [r2], r3 + vext.8 d16, d22, d23, #1 + vext.8 d17, d22, d23, #2 + vext.8 d18, d22, d23, #3 + vext.8 d19, d22, d23, #4 + vext.8 d20, d22, d23, #5 + vext.8 d21, d22, d23, #6 + vext.8 d22, d22, d23, #7 +.endm + +.macro loadin8 + pld [r2] + vld1.8 {d16}, [r2], r3 + pld [r2] + vld1.8 {d17}, [r2], r3 + pld [r2] + vld1.8 {d18}, [r2], r3 + pld [r2] + vld1.8 {d19}, [r2], r3 + pld [r2] + vld1.8 {d20}, [r2], r3 + pld [r2] + vld1.8 {d21}, [r2], r3 + pld [r2] + vld1.8 {d22}, [r2], r3 + pld [r2] + vld1.8 {d23}, [r2], r3 +.endm + +.macro qpel_filter_1_32b + vmov.i16 d16, #58 + vmov.i16 d17, #10 + vmull.s16 q9, d6, d16 // 58 * d0 + vmull.s16 q10, d7, d16 // 58 * d1 + vmov.i16 d16, #17 + vmull.s16 q11, d4, d17 // 10 * c0 + vmull.s16 q12, d5, d17 // 10 * c1 + vmov.i16 d17, #5 + vmull.s16 q13, d8, d16 // 17 * e0 + vmull.s16 q14, d9, d16 // 17 * e1 + vmull.s16 q15, d10, d17 // 5 * f0 + vmull.s16 q8, d11, d17 // 5 * f1 + vsub.s32 q9, q11 // 58 * d0 - 10 * c0 + vsub.s32 q10, q12 // 58 * d1 - 10 * c1 + vshll.s16 q11, d2, #2 // 4 * b0 + vshll.s16 q12, d3, #2 // 4 * b1 + vadd.s32 q9, q13 // 58 * d0 - 10 * c0 + 17 * e0 + vadd.s32 q10, q14 // 58 * d1 - 10 * c1 + 17 * e1 + vsubl.s16 q13, d12, d0 // g0 - a0 + vsubl.s16 q14, d13, d1 // g1 - a1 + vadd.s32 q9, q11 // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 + vadd.s32 q10, q12 // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 + vsub.s32 q13, q15 // g0 - a0 - 5 * f0 + vsub.s32 q14, q8 // g1 - a1 - 5 * f1 + vadd.s32 q9, q13 // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 + g0 - a0 - 5 * f0 + vadd.s32 q10, q14 // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 + g1 - a1 - 5 * f1 + vqshrn.s32 d16, q9, #6 + vqshrn.s32 d17, q10, #6 +.endm + +// input q0 - q7 +// output q8 +.macro qpel_filter_2_32b + vmov.i32 q8, #11 + vaddl.s16 q9, d6, d8 // d0 + e0 + vaddl.s16 q10, d7, d9 // d1 + e1 + vaddl.s16 q11, d4, d10 // c0 + f0 + vaddl.s16 q12, d5, d11 // c1 + f1 + vmul.s32 q11, q8 // 11 * (c0 + f0) + vmul.s32 q12, q8 // 11 * (c1 + f1) + vmov.i32 q8, #40 + vaddl.s16 q15, d2, d12 // b0 + g0 + vmul.s32 q9, q8 // 40 * (d0 + e0) + vmul.s32 q10, q8 // 40 * (d1 + e1) + vaddl.s16 q8, d3, d13 // b1 + g1 + vaddl.s16 q13, d0, d14 // a0 + h0 + vaddl.s16 q14, d1, d15 // a1 + h1 + vshl.s32 q15, #2 // 4*(b0+g0) + vshl.s32 q8, #2 // 4*(b1+g1) + vadd.s32 q11, q13 // 11 * (c0 + f0) + a0 + h0 + vadd.s32 q12, q14 // 11 * (c1 + f1) + a1 + h1 + vadd.s32 q9, q15 // 40 * (d0 + e0) + 4*(b0+g0) + vadd.s32 q10, q8 // 40 * (d1 + e1) + 4*(b1+g1) + vsub.s32 q9, q11 // 40 * (d0 + e0) + 4*(b0+g0) - (11 * (c0 + f0) + a0 + h0) + vsub.s32 q10, q12 // 40 * (d1 + e1) + 4*(b1+g1) - (11 * (c1 + f1) + a1 + h1) + vqshrn.s32 d16, q9, #6 + vqshrn.s32 d17, q10, #6 +.endm + +.macro qpel_filter_3_32b + vmov.i16 d16, #58 + vmov.i16 d17, #10 + vmull.s16 q9, d8, d16 // 58 * d0 + vmull.s16 q10, d9, d16 // 58 * d1 + vmov.i16 d16, #17 + vmull.s16 q11, d10, d17 // 10 * c0 + vmull.s16 q12, d11, d17 // 10 * c1 + vmov.i16 d17, #5 + vmull.s16 q13, d6, d16 // 17 * e0 + vmull.s16 q14, d7, d16 // 17 * e1 + vmull.s16 q15, d4, d17 // 5 * f0 + vmull.s16 q8, d5, d17 // 5 * f1 + vsub.s32 q9, q11 // 58 * d0 - 10 * c0 + vsub.s32 q10, q12 // 58 * d1 - 10 * c1 + vshll.s16 q11, d12, #2 // 4 * b0 + vshll.s16 q12, d13, #2 // 4 * b1 + vadd.s32 q9, q13 // 58 * d0 - 10 * c0 + 17 * e0 + vadd.s32 q10, q14 // 58 * d1 - 10 * c1 + 17 * e1 + vsubl.s16 q13, d2, d14 // g0 - a0 + vsubl.s16 q14, d3, d15 // g1 - a1 + vadd.s32 q9, q11 // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 + vadd.s32 q10, q12 // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 + vsub.s32 q13, q15 // g0 - a0 - 5 * f0 + vsub.s32 q14, q8 // g1 - a1 - 5 * f1 + vadd.s32 q9, q13 // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 + g0 - a0 - 5 * f0 + vadd.s32 q10, q14 // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 + g1 - a1 - 5 * f1 + vqshrn.s32 d16, q9, #6 + vqshrn.s32 d17, q10, #6 +.endm + +.macro qpel_filter_1 out=q7 + vmov.u8 d24, #58 + vmov.u8 d25, #10 + vshll.u8 q13, d20, #4 // 16*e + vshll.u8 q14, d21, #2 // 4*f + vmull.u8 \out, d19, d24 // 58*d + vaddw.u8 q13, q13, d20 // 17*e + vmull.u8 q15, d18, d25 // 10*c + vaddw.u8 q14, q14, d21 // 5*f + vsubl.u8 q12, d22, d16 // g - a + vadd.u16 \out, q13 // 58d + 17e + vshll.u8 q13, d17, #2 // 4*b + vadd.u16 q15, q14 // 10*c + 5*f + vadd.s16 q13, q12 // - a + 4*b + g + vsub.s16 \out, q15 // -10*c + 58*d + 17*e -5*f + vadd.s16 \out, q13 // -a + 4*b -10*c + 58*d + 17*e -5*f +.endm + +.macro qpel_filter_2 out=q7 + vmov.i16 q12, #10 + vmov.i16 q14, #11 + vaddl.u8 q13, d19, d20 // d + e + vaddl.u8 q15, d18, d21 // c + f + vmul.u16 q13, q12 // 10 * (d+e) + vmul.u16 q15, q14 // 11 * ( c + f) + vaddl.u8 \out, d17, d22 // b + g + vaddl.u8 q12, d16, d23 // a + h + vadd.u16 \out, q13 // b + 10 * (d + e) + g + vadd.s16 q12, q15 + vshl.u16 \out, #2 // 4 * (b + 10 * (d + e) + g) + vsub.s16 \out, q12 +.endm + +.macro qpel_filter_3 out=q7 + vmov.u8 d24, #58 + vmov.u8 d25, #10 + vshll.u8 q13, d19, #4 // 16*e + vshll.u8 q14, d18, #2 // 4*f + vmull.u8 \out, d20, d24 // 58*d + vaddw.u8 q13, q13, d19 // 17*e + vmull.u8 q15, d21, d25 // 10*c + vaddw.u8 q14, q14, d18 // 5*f + vsubl.u8 q12, d17, d23 // g - a + vadd.u16 \out, q13 // 58d + 17e + vshll.u8 q13, d22, #2 // 4*b + vadd.u16 q15, q14 // 10*c + 5*f + vadd.s16 q13, q12 // - a + 4*b + g + vsub.s16 \out, q15 // -10*c + 58*d + 17*e -5*f + vadd.s16 \out, q13 // -a + 4*b -10*c + 58*d + 17*e -5*f +.endm + +.macro hevc_put_qpel_vX_neon_8 filter + push {r4, r5, r6, r7} + ldr r4, [sp, #16] // height + ldr r5, [sp, #20] // width + vpush {d8-d15} + sub r2, r2, r3, lsl #1 + sub r2, r3 + mov r12, r4 + mov r6, r0 + mov r7, r2 + lsl r1, #1 +0: loadin8 + cmp r5, #4 + beq 4f +8: subs r4, #1 + \filter + vst1.16 {q7}, [r0], r1 + regshuffle_d8 + vld1.8 {d23}, [r2], r3 + bne 8b + subs r5, #8 + beq 99f + mov r4, r12 + add r6, #16 + mov r0, r6 + add r7, #8 + mov r2, r7 + b 0b +4: subs r4, #1 + \filter + vst1.16 d14, [r0], r1 + regshuffle_d8 + vld1.32 {d23[0]}, [r2], r3 + bne 4b +99: vpop {d8-d15} + pop {r4, r5, r6, r7} + bx lr +.endm + +.macro hevc_put_qpel_uw_vX_neon_8 filter + push {r4-r10} + ldr r5, [sp, #28] // width + ldr r4, [sp, #32] // height + ldr r8, [sp, #36] // src2 + ldr r9, [sp, #40] // src2stride + vpush {d8-d15} + sub r2, r2, r3, lsl #1 + sub r2, r3 + mov r12, r4 + mov r6, r0 + mov r7, r2 + cmp r8, #0 + bne .Lbi\@ +0: loadin8 + cmp r5, #4 + beq 4f +8: subs r4, #1 + \filter + vqrshrun.s16 d0, q7, #6 + vst1.8 d0, [r0], r1 + regshuffle_d8 + vld1.8 {d23}, [r2], r3 + bne 8b + subs r5, #8 + beq 99f + mov r4, r12 + add r6, #8 + mov r0, r6 + add r7, #8 + mov r2, r7 + b 0b +4: subs r4, #1 + \filter + vqrshrun.s16 d0, q7, #6 + vst1.32 d0[0], [r0], r1 + regshuffle_d8 + vld1.32 {d23[0]}, [r2], r3 + bne 4b + b 99f +.Lbi\@: lsl r9, #1 + mov r10, r8 +0: loadin8 + cmp r5, #4 + beq 4f +8: subs r4, #1 + \filter + vld1.16 {q0}, [r8], r9 + vqadd.s16 q0, q7 + vqrshrun.s16 d0, q0, #7 + vst1.8 d0, [r0], r1 + regshuffle_d8 + vld1.8 {d23}, [r2], r3 + bne 8b + subs r5, #8 + beq 99f + mov r4, r12 + add r6, #8 + mov r0, r6 + add r10, #16 + mov r8, r10 + add r7, #8 + mov r2, r7 + b 0b +4: subs r4, #1 + \filter + vld1.16 d0, [r8], r9 + vqadd.s16 d0, d14 + vqrshrun.s16 d0, q0, #7 + vst1.32 d0[0], [r0], r1 + regshuffle_d8 + vld1.32 {d23[0]}, [r2], r3 + bne 4b +99: vpop {d8-d15} + pop {r4-r10} + bx lr +.endm + +function ff_hevc_put_qpel_v1_neon_8, export=1 + hevc_put_qpel_vX_neon_8 qpel_filter_1 +endfunc + +function ff_hevc_put_qpel_v2_neon_8, export=1 + hevc_put_qpel_vX_neon_8 qpel_filter_2 +endfunc + +function ff_hevc_put_qpel_v3_neon_8, export=1 + hevc_put_qpel_vX_neon_8 qpel_filter_3 +endfunc + + +function ff_hevc_put_qpel_uw_v1_neon_8, export=1 + hevc_put_qpel_uw_vX_neon_8 qpel_filter_1 +endfunc + +function ff_hevc_put_qpel_uw_v2_neon_8, export=1 + hevc_put_qpel_uw_vX_neon_8 qpel_filter_2 +endfunc + +function ff_hevc_put_qpel_uw_v3_neon_8, export=1 + hevc_put_qpel_uw_vX_neon_8 qpel_filter_3 +endfunc + +.macro hevc_put_qpel_hX_neon_8 filter + push {r4, r5, r6, r7} + ldr r4, [sp, #16] // height + ldr r5, [sp, #20] // width + + vpush {d8-d15} + sub r2, #4 + lsl r1, #1 + mov r12, r4 + mov r6, r0 + mov r7, r2 + cmp r5, #4 + beq 4f +8: subs r4, #1 + vextin8 + \filter + vst1.16 {q7}, [r0], r1 + bne 8b + subs r5, #8 + beq 99f + mov r4, r12 + add r6, #16 + mov r0, r6 + add r7, #8 + mov r2, r7 + cmp r5, #4 + bne 8b +4: subs r4, #1 + vextin8 + \filter + vst1.16 d14, [r0], r1 + bne 4b +99: vpop {d8-d15} + pop {r4, r5, r6, r7} + bx lr +.endm + +.macro hevc_put_qpel_uw_hX_neon_8 filter + push {r4-r10} + ldr r5, [sp, #28] // width + ldr r4, [sp, #32] // height + ldr r8, [sp, #36] // src2 + ldr r9, [sp, #40] // src2stride + vpush {d8-d15} + sub r2, #4 + mov r12, r4 + mov r6, r0 + mov r7, r2 + cmp r8, #0 + bne .Lbi\@ + cmp r5, #4 + beq 4f +8: subs r4, #1 + vextin8 + \filter + vqrshrun.s16 d0, q7, #6 + vst1.8 d0, [r0], r1 + bne 8b + subs r5, #8 + beq 99f + mov r4, r12 + add r6, #8 + mov r0, r6 + add r7, #8 + mov r2, r7 + cmp r5, #4 + bne 8b +4: subs r4, #1 + vextin8 + \filter + vqrshrun.s16 d0, q7, #6 + vst1.32 d0[0], [r0], r1 + bne 4b + b 99f +.Lbi\@: + lsl r9, #1 + cmp r5, #4 + beq 4f + mov r10, r8 +8: subs r4, #1 + vextin8 + \filter + vld1.16 {q0}, [r8], r9 + vqadd.s16 q0, q7 + vqrshrun.s16 d0, q0, #7 + vst1.8 d0, [r0], r1 + bne 8b + subs r5, #8 + beq 99f + mov r4, r12 + add r6, #8 + add r10, #16 + mov r8, r10 + mov r0, r6 + add r7, #8 + mov r2, r7 + cmp r5, #4 + bne 8b +4: subs r4, #1 + vextin8 + \filter + vld1.16 d0, [r8], r9 + vqadd.s16 d0, d14 + vqrshrun.s16 d0, q0, #7 + vst1.32 d0[0], [r0], r1 + bne 4b +99: vpop {d8-d15} + pop {r4-r10} + bx lr +.endm + +function ff_hevc_put_qpel_h1_neon_8, export=1 + hevc_put_qpel_hX_neon_8 qpel_filter_1 +endfunc + +function ff_hevc_put_qpel_h2_neon_8, export=1 + hevc_put_qpel_hX_neon_8 qpel_filter_2 +endfunc + +function ff_hevc_put_qpel_h3_neon_8, export=1 + hevc_put_qpel_hX_neon_8 qpel_filter_3 +endfunc + + +function ff_hevc_put_qpel_uw_h1_neon_8, export=1 + hevc_put_qpel_uw_hX_neon_8 qpel_filter_1 +endfunc + +function ff_hevc_put_qpel_uw_h2_neon_8, export=1 + hevc_put_qpel_uw_hX_neon_8 qpel_filter_2 +endfunc + +function ff_hevc_put_qpel_uw_h3_neon_8, export=1 + hevc_put_qpel_uw_hX_neon_8 qpel_filter_3 +endfunc + +.macro hevc_put_qpel_hXvY_neon_8 filterh filterv + push {r4, r5, r6, r7} + ldr r4, [sp, #16] // height + ldr r5, [sp, #20] // width + + vpush {d8-d15} + sub r2, #4 + sub r2, r2, r3, lsl #1 + sub r2, r3 // extra_before 3 + lsl r1, #1 + mov r12, r4 + mov r6, r0 + mov r7, r2 +0: vextin8 + \filterh q0 + vextin8 + \filterh q1 + vextin8 + \filterh q2 + vextin8 + \filterh q3 + vextin8 + \filterh q4 + vextin8 + \filterh q5 + vextin8 + \filterh q6 + vextin8 + \filterh q7 + cmp r5, #4 + beq 4f +8: subs r4, #1 + \filterv + vst1.16 {q8}, [r0], r1 + regshuffle_q8 + vextin8 + \filterh q7 + bne 8b + subs r5, #8 + beq 99f + mov r4, r12 + add r6, #16 + mov r0, r6 + add r7, #8 + mov r2, r7 + b 0b +4: subs r4, #1 + \filterv + vst1.16 d16, [r0], r1 + regshuffle_q8 + vextin8 + \filterh q7 + bne 4b +99: vpop {d8-d15} + pop {r4, r5, r6, r7} + bx lr +.endm + +.macro hevc_put_qpel_uw_hXvY_neon_8 filterh filterv + push {r4-r10} + ldr r5, [sp, #28] // width + ldr r4, [sp, #32] // height + ldr r8, [sp, #36] // src2 + ldr r9, [sp, #40] // src2stride + vpush {d8-d15} + sub r2, #4 + sub r2, r2, r3, lsl #1 + sub r2, r3 // extra_before 3 + mov r12, r4 + mov r6, r0 + mov r7, r2 + cmp r8, #0 + bne .Lbi\@ +0: vextin8 + \filterh q0 + vextin8 + \filterh q1 + vextin8 + \filterh q2 + vextin8 + \filterh q3 + vextin8 + \filterh q4 + vextin8 + \filterh q5 + vextin8 + \filterh q6 + vextin8 + \filterh q7 + cmp r5, #4 + beq 4f +8: subs r4, #1 + \filterv + vqrshrun.s16 d0, q8, #6 + vst1.8 d0, [r0], r1 + regshuffle_q8 + vextin8 + \filterh q7 + bne 8b + subs r5, #8 + beq 99f + mov r4, r12 + add r6, #8 + mov r0, r6 + add r7, #8 + mov r2, r7 + b 0b +4: subs r4, #1 + \filterv + vqrshrun.s16 d0, q8, #6 + vst1.32 d0[0], [r0], r1 + regshuffle_q8 + vextin8 + \filterh q7 + bne 4b + b 99f +.Lbi\@: lsl r9, #1 + mov r10, r8 +0: vextin8 + \filterh q0 + vextin8 + \filterh q1 + vextin8 + \filterh q2 + vextin8 + \filterh q3 + vextin8 + \filterh q4 + vextin8 + \filterh q5 + vextin8 + \filterh q6 + vextin8 + \filterh q7 + cmp r5, #4 + beq 4f +8: subs r4, #1 + \filterv + vld1.16 {q0}, [r8], r9 + vqadd.s16 q0, q8 + vqrshrun.s16 d0, q0, #7 + vst1.8 d0, [r0], r1 + regshuffle_q8 + vextin8 + \filterh q7 + bne 8b + subs r5, #8 + beq 99f + mov r4, r12 + add r6, #8 + mov r0, r6 + add r10, #16 + mov r8, r10 + add r7, #8 + mov r2, r7 + b 0b +4: subs r4, #1 + \filterv + vld1.16 d0, [r8], r9 + vqadd.s16 d0, d16 + vqrshrun.s16 d0, q0, #7 + vst1.32 d0[0], [r0], r1 + regshuffle_q8 + vextin8 + \filterh q7 + bne 4b +99: vpop {d8-d15} + pop {r4-r10} + bx lr +.endm + + +function ff_hevc_put_qpel_h1v1_neon_8, export=1 + hevc_put_qpel_hXvY_neon_8 qpel_filter_1 qpel_filter_1_32b +endfunc + +function ff_hevc_put_qpel_h2v1_neon_8, export=1 + hevc_put_qpel_hXvY_neon_8 qpel_filter_2 qpel_filter_1_32b +endfunc + +function ff_hevc_put_qpel_h3v1_neon_8, export=1 + hevc_put_qpel_hXvY_neon_8 qpel_filter_3 qpel_filter_1_32b +endfunc + +function ff_hevc_put_qpel_h1v2_neon_8, export=1 + hevc_put_qpel_hXvY_neon_8 qpel_filter_1 qpel_filter_2_32b +endfunc + +function ff_hevc_put_qpel_h2v2_neon_8, export=1 + hevc_put_qpel_hXvY_neon_8 qpel_filter_2 qpel_filter_2_32b +endfunc + +function ff_hevc_put_qpel_h3v2_neon_8, export=1 + hevc_put_qpel_hXvY_neon_8 qpel_filter_3 qpel_filter_2_32b +endfunc + +function ff_hevc_put_qpel_h1v3_neon_8, export=1 + hevc_put_qpel_hXvY_neon_8 qpel_filter_1 qpel_filter_3_32b +endfunc + +function ff_hevc_put_qpel_h2v3_neon_8, export=1 + hevc_put_qpel_hXvY_neon_8 qpel_filter_2 qpel_filter_3_32b +endfunc + +function ff_hevc_put_qpel_h3v3_neon_8, export=1 + hevc_put_qpel_hXvY_neon_8 qpel_filter_3 qpel_filter_3_32b +endfunc + + +function ff_hevc_put_qpel_uw_h1v1_neon_8, export=1 + hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_1 qpel_filter_1_32b +endfunc + +function ff_hevc_put_qpel_uw_h2v1_neon_8, export=1 + hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_2 qpel_filter_1_32b +endfunc + +function ff_hevc_put_qpel_uw_h3v1_neon_8, export=1 + hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_3 qpel_filter_1_32b +endfunc + +function ff_hevc_put_qpel_uw_h1v2_neon_8, export=1 + hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_1 qpel_filter_2_32b +endfunc + +function ff_hevc_put_qpel_uw_h2v2_neon_8, export=1 + hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_2 qpel_filter_2_32b +endfunc + +function ff_hevc_put_qpel_uw_h3v2_neon_8, export=1 + hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_3 qpel_filter_2_32b +endfunc + +function ff_hevc_put_qpel_uw_h1v3_neon_8, export=1 + hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_1 qpel_filter_3_32b +endfunc + +function ff_hevc_put_qpel_uw_h2v3_neon_8, export=1 + hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_2 qpel_filter_3_32b +endfunc + +function ff_hevc_put_qpel_uw_h3v3_neon_8, export=1 + hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_3 qpel_filter_3_32b +endfunc + +.macro init_put_pixels + pld [r1] + pld [r1, r2] + mov r12, MAX_PB_SIZE + lsl r12, #1 +.endm + +function ff_hevc_put_pixels_w2_neon_8, export=1 + init_put_pixels + vmov.u8 d5, #255 + vshr.u64 d5, #32 +0: subs r3, #1 + vld1.32 {d0[0]}, [r1], r2 + pld [r1] + vld1.32 d6, [r0] + vshll.u8 q0, d0, #6 + vbit d6, d0, d5 + vst1.32 d6, [r0], r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_pixels_w4_neon_8, export=1 + init_put_pixels +0: subs r3, #2 + vld1.32 {d0[0]}, [r1], r2 + vld1.32 {d0[1]}, [r1], r2 + pld [r1] + pld [r1, r2] + vshll.u8 q0, d0, #6 + vst1.64 {d0}, [r0], r12 + vst1.64 {d1}, [r0], r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_pixels_w6_neon_8, export=1 + init_put_pixels + vmov.u8 q10, #255 + vshr.u64 d21, #32 +0: subs r3, #1 + vld1.16 {d0}, [r1], r2 + pld [r1] + vshll.u8 q0, d0, #6 + vld1.8 {q12}, [r0] + vbit q12, q0, q10 + vst1.8 {q12}, [r0], r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_pixels_w8_neon_8, export=1 + init_put_pixels +0: subs r3, #2 + vld1.8 {d0}, [r1], r2 + vld1.8 {d2}, [r1], r2 + pld [r1] + pld [r1, r2] + vshll.u8 q0, d0, #6 + vshll.u8 q1, d2, #6 + vst1.16 {q0}, [r0], r12 + vst1.16 {q1}, [r0], r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_pixels_w12_neon_8, export=1 + init_put_pixels +0: subs r3, #2 + vld1.64 {d0}, [r1] + add r1, #8 + vld1.32 {d1[0]}, [r1], r2 + sub r1, #8 + vld1.64 {d2}, [r1] + add r1, #8 + vld1.32 {d1[1]}, [r1], r2 + sub r1, #8 + pld [r1] + pld [r1, r2] + vshll.u8 q8, d0, #6 + vshll.u8 q9, d1, #6 + vshll.u8 q10, d2, #6 + vmov d22, d19 + vst1.64 {d16, d17, d18}, [r0], r12 + vst1.64 {d20, d21, d22}, [r0], r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_pixels_w16_neon_8, export=1 + init_put_pixels +0: subs r3, #2 + vld1.8 {q0}, [r1], r2 + vld1.8 {q1}, [r1], r2 + pld [r1] + pld [r1, r2] + vshll.u8 q8, d0, #6 + vshll.u8 q9, d1, #6 + vshll.u8 q10, d2, #6 + vshll.u8 q11, d3, #6 + vst1.8 {q8, q9}, [r0], r12 + vst1.8 {q10, q11}, [r0], r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_pixels_w24_neon_8, export=1 + init_put_pixels +0: subs r3, #1 + vld1.8 {d0, d1, d2}, [r1], r2 + pld [r1] + vshll.u8 q10, d0, #6 + vshll.u8 q11, d1, #6 + vshll.u8 q12, d2, #6 + vstm r0, {q10, q11, q12} + add r0, r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_pixels_w32_neon_8, export=1 + init_put_pixels +0: subs r3, #1 + vld1.8 {q0, q1}, [r1], r2 + pld [r1] + vshll.u8 q8, d0, #6 + vshll.u8 q9, d1, #6 + vshll.u8 q10, d2, #6 + vshll.u8 q11, d3, #6 + vstm r0, {q8, q9, q10, q11} + add r0, r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_pixels_w48_neon_8, export=1 + init_put_pixels +0: subs r3, #1 + vld1.8 {q0, q1}, [r1] + add r1, #32 + vld1.8 {q2}, [r1], r2 + sub r1, #32 + pld [r1] + vshll.u8 q8, d0, #6 + vshll.u8 q9, d1, #6 + vshll.u8 q10, d2, #6 + vshll.u8 q11, d3, #6 + vshll.u8 q12, d4, #6 + vshll.u8 q13, d5, #6 + vstm r0, {q8, q9, q10, q11, q12, q13} + add r0, r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_pixels_w64_neon_8, export=1 + init_put_pixels +0: subs r3, #1 + vld1.8 {q0, q1}, [r1] + add r1, #32 + vld1.8 {q2, q3}, [r1], r2 + sub r1, #32 + pld [r1] + vshll.u8 q8, d0, #6 + vshll.u8 q9, d1, #6 + vshll.u8 q10, d2, #6 + vshll.u8 q11, d3, #6 + vshll.u8 q12, d4, #6 + vshll.u8 q13, d5, #6 + vshll.u8 q14, d6, #6 + vshll.u8 q15, d7, #6 + vstm r0, {q8, q9, q10, q11, q12, q13, q14, q15} + add r0, r12 + bne 0b + bx lr +endfunc + +function ff_hevc_put_qpel_uw_pixels_neon_8, export=1 + push {r4-r9} + ldr r5, [sp, #24] // width + ldr r4, [sp, #28] // height + ldr r8, [sp, #32] // src2 + ldr r9, [sp, #36] // src2stride + vpush {d8-d15} + cmp r8, #0 + bne 2f +1: subs r4, #1 + vld1.8 {d0}, [r2], r3 + vst1.8 d0, [r0], r1 + bne 1b + vpop {d8-d15} + pop {r4-r9} + bx lr +2: subs r4, #1 + vld1.8 {d0}, [r2], r3 + vld1.16 {q1}, [r8], r9 + vshll.u8 q0, d0, #6 + vqadd.s16 q0, q1 + vqrshrun.s16 d0, q0, #7 + vst1.8 d0, [r0], r1 + bne 2b + vpop {d8-d15} + pop {r4-r9} + bx lr +endfunc + +.macro put_qpel_uw_pixels width, regs, regs2, regs3, regs4 +function ff_hevc_put_qpel_uw_pixels_w\width\()_neon_8, export=1 + ldr r12, [sp] // height +1: subs r12, #4 + vld1.32 {\regs} , [r2], r3 + vld1.32 {\regs2} , [r2], r3 + vld1.32 {\regs3} , [r2], r3 + vld1.32 {\regs4} , [r2], r3 + vst1.32 {\regs} , [r0], r1 + vst1.32 {\regs2} , [r0], r1 + vst1.32 {\regs3} , [r0], r1 + vst1.32 {\regs4} , [r0], r1 + bne 1b + bx lr +endfunc +.endm + +.macro put_qpel_uw_pixels_m width, regs, regs2, regs3, regs4 +function ff_hevc_put_qpel_uw_pixels_w\width\()_neon_8, export=1 + push {r4-r5} + ldr r12, [sp, #8] // height +1: subs r12, #2 + mov r4, r2 + vld1.32 {\regs} , [r2]! + vld1.32 {\regs2} , [r2] + add r2, r4, r3 + mov r4, r2 + vld1.32 {\regs3} , [r2]! + vld1.32 {\regs4} , [r2] + add r2, r4, r3 + mov r5, r0 + vst1.32 {\regs} , [r0]! + vst1.32 {\regs2} , [r0] + add r0, r5, r1 + mov r5, r0 + vst1.32 {\regs3} , [r0]! + vst1.32 {\regs4} , [r0] + add r0, r5, r1 + bne 1b + pop {r4-r5} + bx lr +endfunc +.endm + +put_qpel_uw_pixels 4, d0[0], d0[1], d1[0], d1[1] +put_qpel_uw_pixels 8, d0, d1, d2, d3 +put_qpel_uw_pixels_m 12, d0, d1[0], d2, d3[0] +put_qpel_uw_pixels 16, q0, q1, q2, q3 +put_qpel_uw_pixels 24, d0-d2, d3-d5, d16-d18, d19-d21 +put_qpel_uw_pixels 32, q0-q1, q2-q3, q8-q9, q10-q11 +put_qpel_uw_pixels_m 48, q0-q1, q2, q8-q9, q10 +put_qpel_uw_pixels_m 64, q0-q1, q2-q3, q8-q9, q10-q11 |