aboutsummaryrefslogtreecommitdiffstats
path: root/libavutil/x86/float_dsp.asm
diff options
context:
space:
mode:
authorJames Almer <jamrial@gmail.com>2014-04-16 02:09:36 -0300
committerMichael Niedermayer <michaelni@gmx.at>2014-04-16 18:36:52 +0200
commit11b36b1ee051e53c815114f8c258e23166ea9ed1 (patch)
treeb83aee02b5cba539bce45a351c1fbd810e38706a /libavutil/x86/float_dsp.asm
parent27f184ef40e8da09d9dfc8f71a8d1893729381f6 (diff)
downloadffmpeg-11b36b1ee051e53c815114f8c258e23166ea9ed1.tar.gz
x86/float_dsp: unroll loop in vector_fmac_scalar
~6% faster SSE2 performance. AVX/FMA3 are unaffected. Signed-off-by: James Almer <jamrial@gmail.com> Reviewed-by: Christophe Gisquet <christophe.gisquet@gmail.com> Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavutil/x86/float_dsp.asm')
-rw-r--r--libavutil/x86/float_dsp.asm44
1 files changed, 26 insertions, 18 deletions
diff --git a/libavutil/x86/float_dsp.asm b/libavutil/x86/float_dsp.asm
index 01ac60eb0a..8d236ef7af 100644
--- a/libavutil/x86/float_dsp.asm
+++ b/libavutil/x86/float_dsp.asm
@@ -61,9 +61,9 @@ VECTOR_FMUL
%macro VECTOR_FMAC_SCALAR 0
%if UNIX64
-cglobal vector_fmac_scalar, 3,3,3, dst, src, len
+cglobal vector_fmac_scalar, 3,3,5, dst, src, len
%else
-cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
+cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
%endif
%if ARCH_X86_32
VBROADCASTSS m0, mulm
@@ -78,23 +78,31 @@ cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
%endif
lea lenq, [lend*4-64]
.loop:
-%assign a 0
-%rep 32/mmsize
%if cpuflag(fma3)
- mova m1, [dstq+lenq+(a+0)*mmsize]
- mova m2, [dstq+lenq+(a+1)*mmsize]
- fmaddps m1, m0, [srcq+lenq+(a+0)*mmsize], m1
- fmaddps m2, m0, [srcq+lenq+(a+1)*mmsize], m2
-%else
- mulps m1, m0, [srcq+lenq+(a+0)*mmsize]
- mulps m2, m0, [srcq+lenq+(a+1)*mmsize]
- addps m1, m1, [dstq+lenq+(a+0)*mmsize]
- addps m2, m2, [dstq+lenq+(a+1)*mmsize]
-%endif
- mova [dstq+lenq+(a+0)*mmsize], m1
- mova [dstq+lenq+(a+1)*mmsize], m2
-%assign a a+2
-%endrep
+ mova m1, [dstq+lenq]
+ mova m2, [dstq+lenq+1*mmsize]
+ fmaddps m1, m0, [srcq+lenq], m1
+ fmaddps m2, m0, [srcq+lenq+1*mmsize], m2
+%else ; cpuflag
+ mulps m1, m0, [srcq+lenq]
+ mulps m2, m0, [srcq+lenq+1*mmsize]
+%if mmsize < 32
+ mulps m3, m0, [srcq+lenq+2*mmsize]
+ mulps m4, m0, [srcq+lenq+3*mmsize]
+%endif ; mmsize
+ addps m1, m1, [dstq+lenq]
+ addps m2, m2, [dstq+lenq+1*mmsize]
+%if mmsize < 32
+ addps m3, m3, [dstq+lenq+2*mmsize]
+ addps m4, m4, [dstq+lenq+3*mmsize]
+%endif ; mmsize
+%endif ; cpuflag
+ mova [dstq+lenq], m1
+ mova [dstq+lenq+1*mmsize], m2
+%if mmsize < 32
+ mova [dstq+lenq+2*mmsize], m3
+ mova [dstq+lenq+3*mmsize], m4
+%endif ; mmsize
sub lenq, 64
jge .loop
REP_RET