diff options
author | Christophe Gisquet <christophe.gisquet@gmail.com> | 2014-05-30 16:28:49 +0200 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2014-06-05 20:31:59 +0200 |
commit | ccff45a0d3c981edc97078885e2a630e0436ce31 (patch) | |
tree | 978070bbff082597623c0c954e9890b6839e794d /libavcodec/arm/lossless_audiodsp_neon.S | |
parent | 151f88d507f18c0aae011f9e88f40c508c11640f (diff) | |
download | ffmpeg-ccff45a0d3c981edc97078885e2a630e0436ce31.tar.gz |
apedsp: move to llauddsp
APE is not the sole codec using scalarproduct_and_madd_int16.
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/arm/lossless_audiodsp_neon.S')
-rw-r--r-- | libavcodec/arm/lossless_audiodsp_neon.S | 62 |
1 files changed, 62 insertions, 0 deletions
diff --git a/libavcodec/arm/lossless_audiodsp_neon.S b/libavcodec/arm/lossless_audiodsp_neon.S new file mode 100644 index 0000000000..ba7c45fcef --- /dev/null +++ b/libavcodec/arm/lossless_audiodsp_neon.S @@ -0,0 +1,62 @@ +/* + * ARM NEON optimised integer operations + * Copyright (c) 2009 Kostya Shishkov + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/arm/asm.S" + +@ scalarproduct_and_madd_int16(/*aligned*/v0,v1,v2,order,mul) +function ff_scalarproduct_and_madd_int16_neon, export=1 + vld1.16 {d28[],d29[]}, [sp] + vmov.i16 q0, #0 + vmov.i16 q1, #0 + vmov.i16 q2, #0 + vmov.i16 q3, #0 + mov r12, r0 + +1: vld1.16 {d16-d17}, [r0,:128]! + vld1.16 {d18-d19}, [r1]! + vld1.16 {d20-d21}, [r2]! + vld1.16 {d22-d23}, [r0,:128]! + vld1.16 {d24-d25}, [r1]! + vld1.16 {d26-d27}, [r2]! + vmul.s16 q10, q10, q14 + vmul.s16 q13, q13, q14 + vmlal.s16 q0, d16, d18 + vmlal.s16 q1, d17, d19 + vadd.s16 q10, q8, q10 + vadd.s16 q13, q11, q13 + vmlal.s16 q2, d22, d24 + vmlal.s16 q3, d23, d25 + vst1.16 {q10}, [r12,:128]! + subs r3, r3, #16 + vst1.16 {q13}, [r12,:128]! + bgt 1b + + vpadd.s32 d16, d0, d1 + vpadd.s32 d17, d2, d3 + vpadd.s32 d18, d4, d5 + vpadd.s32 d19, d6, d7 + vpadd.s32 d0, d16, d17 + vpadd.s32 d1, d18, d19 + vpadd.s32 d2, d0, d1 + vpaddl.s32 d3, d2 + vmov.32 r0, d3[0] + bx lr +endfunc |