diff options
author | James Almer <jamrial@gmail.com> | 2014-03-20 15:37:55 -0300 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2014-04-04 17:40:51 +0200 |
commit | 81e02fae6ec12ae85f052bd74aa9506cbebe4517 (patch) | |
tree | 91f0eaa649b2c07114620f50cee00f385f3272c9 /libavcodec/x86 | |
parent | 2025d8026fe507c3f82c3440ecdf09641c577163 (diff) | |
download | ffmpeg-81e02fae6ec12ae85f052bd74aa9506cbebe4517.tar.gz |
x86/synth_filter: add synth_filter_avx
Sandy Bridge Win64:
180 cycles in ff_synth_filter_inner_sse2
150 cycles in ff_synth_filter_inner_avx
Also switch some instructions to a three operand format to avoid
assembly errors with Yasm 1.1.0 or older.
Signed-off-by: James Almer <jamrial@gmail.com>
Signed-off-by: Anton Khirnov <anton@khirnov.net>
Diffstat (limited to 'libavcodec/x86')
-rw-r--r-- | libavcodec/x86/dcadsp.asm | 85 | ||||
-rw-r--r-- | libavcodec/x86/dcadsp_init.c | 4 |
2 files changed, 57 insertions, 32 deletions
diff --git a/libavcodec/x86/dcadsp.asm b/libavcodec/x86/dcadsp.asm index 970ec3dacb..662cb96f66 100644 --- a/libavcodec/x86/dcadsp.asm +++ b/libavcodec/x86/dcadsp.asm @@ -200,18 +200,22 @@ DCA_LFE_FIR 0 DCA_LFE_FIR 1 %macro SETZERO 1 -%if cpuflag(sse2) +%if cpuflag(sse2) && notcpuflag(avx) pxor %1, %1 %else xorps %1, %1, %1 %endif %endmacro -%macro SHUF 2 -%if cpuflag(sse2) - pshufd %1, %2, q0123 +%macro SHUF 3 +%if cpuflag(avx) + mova %3, [%2 - 16] + vperm2f128 %1, %3, %3, 1 + vshufps %1, %1, %1, q0123 +%elif cpuflag(sse2) + pshufd %1, [%2], q0123 %else - mova %1, %2 + mova %1, [%2] shufps %1, %1, q0123 %endif %endmacro @@ -220,43 +224,43 @@ DCA_LFE_FIR 1 ; reading backwards: ptr1 = synth_buf + j + i; ptr2 = synth_buf + j - i ;~ a += window[i + j] * (-synth_buf[15 - i + j]) ;~ b += window[i + j + 16] * (synth_buf[i + j]) - SHUF m5, [ptr2 + j + (15 - 3) * 4] + SHUF m5, ptr2 + j + (15 - 3) * 4, m6 mova m6, [ptr1 + j] %if ARCH_X86_64 - SHUF m11, [ptr2 + j + (15 - 3) * 4 - mmsize] + SHUF m11, ptr2 + j + (15 - 3) * 4 - mmsize, m12 mova m12, [ptr1 + j + mmsize] %endif - mulps m6, [win + %1 + j + 16 * 4] - mulps m5, [win + %1 + j] + mulps m6, m6, [win + %1 + j + 16 * 4] + mulps m5, m5, [win + %1 + j] %if ARCH_X86_64 - mulps m12, [win + %1 + j + mmsize + 16 * 4] - mulps m11, [win + %1 + j + mmsize] + mulps m12, m12, [win + %1 + j + mmsize + 16 * 4] + mulps m11, m11, [win + %1 + j + mmsize] %endif - addps m2, m6 - subps m1, m5 + addps m2, m2, m6 + subps m1, m1, m5 %if ARCH_X86_64 - addps m8, m12 - subps m7, m11 + addps m8, m8, m12 + subps m7, m7, m11 %endif ;~ c += window[i + j + 32] * (synth_buf[16 + i + j]) ;~ d += window[i + j + 48] * (synth_buf[31 - i + j]) - SHUF m6, [ptr2 + j + (31 - 3) * 4] + SHUF m6, ptr2 + j + (31 - 3) * 4, m5 mova m5, [ptr1 + j + 16 * 4] %if ARCH_X86_64 - SHUF m12, [ptr2 + j + (31 - 3) * 4 - mmsize] + SHUF m12, ptr2 + j + (31 - 3) * 4 - mmsize, m11 mova m11, [ptr1 + j + mmsize + 16 * 4] %endif - mulps m5, [win + %1 + j + 32 * 4] - mulps m6, [win + %1 + j + 48 * 4] + mulps m5, m5, [win + %1 + j + 32 * 4] + mulps m6, m6, [win + %1 + j + 48 * 4] %if ARCH_X86_64 - mulps m11, [win + %1 + j + mmsize + 32 * 4] - mulps m12, [win + %1 + j + mmsize + 48 * 4] + mulps m11, m11, [win + %1 + j + mmsize + 32 * 4] + mulps m12, m12, [win + %1 + j + mmsize + 48 * 4] %endif - addps m3, m5 - addps m4, m6 + addps m3, m3, m5 + addps m4, m4, m6 %if ARCH_X86_64 - addps m9, m11 - addps m10, m12 + addps m9, m9, m11 + addps m10, m10, m12 %endif sub j, 64 * 4 %endmacro @@ -269,25 +273,34 @@ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \ synth_buf, synth_buf2, window, out, off, scale %define scale m0 %if ARCH_X86_32 || WIN64 -%if cpuflag(sse2) +%if cpuflag(sse2) && notcpuflag(avx) movd scale, scalem + SPLATD m0 %else - movss scale, scalem + VBROADCASTSS m0, scalem %endif ; Make sure offset is in a register and not on the stack %define OFFQ r4q %else + SPLATD xmm0 +%if cpuflag(avx) + vinsertf128 m0, m0, xmm0, 1 +%endif %define OFFQ offq %endif - SPLATD m0 ; prepare inner counter limit 1 mov r5q, 480 sub r5q, offmp and r5q, -64 shl r5q, 2 +%if ARCH_X86_32 || notcpuflag(avx) mov OFFQ, r5q %define i r5q mov i, 16 * 4 - (ARCH_X86_64 + 1) * mmsize ; main loop counter +%else +%define i 0 +%define OFFQ r5q +%endif %define buf2 synth_buf2q %if ARCH_X86_32 @@ -306,8 +319,10 @@ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \ %define j r3q mov win, windowm mov ptr1, synth_bufm +%if ARCH_X86_32 || notcpuflag(avx) add win, i add ptr1, i +%endif %else ; ARCH_X86_64 %define ptr1 r6q %define ptr2 r7q ; must be loaded @@ -323,7 +338,9 @@ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \ mov ptr2, synth_bufmp ; prepare the inner loop counter mov j, OFFQ +%if ARCH_X86_32 || notcpuflag(avx) sub ptr2, i +%endif .loop1: INNER_LOOP 0 jge .loop1 @@ -346,11 +363,11 @@ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \ %endif ;~ out[i] = a * scale; ;~ out[i + 16] = b * scale; - mulps m1, scale - mulps m2, scale + mulps m1, m1, scale + mulps m2, m2, scale %if ARCH_X86_64 - mulps m7, scale - mulps m8, scale + mulps m7, m7, scale + mulps m8, m8, scale %endif ;~ synth_buf2[i] = c; ;~ synth_buf2[i + 16] = d; @@ -368,8 +385,10 @@ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \ mova [outq + i + 0 * 4 + mmsize], m7 mova [outq + i + 16 * 4 + mmsize], m8 %endif +%if ARCH_X86_32 || notcpuflag(avx) sub i, (ARCH_X86_64 + 1) * mmsize jge .mainloop +%endif RET %endmacro @@ -379,3 +398,5 @@ SYNTH_FILTER %endif INIT_XMM sse2 SYNTH_FILTER +INIT_YMM avx +SYNTH_FILTER diff --git a/libavcodec/x86/dcadsp_init.c b/libavcodec/x86/dcadsp_init.c index 5b77985569..d7e0d65917 100644 --- a/libavcodec/x86/dcadsp_init.c +++ b/libavcodec/x86/dcadsp_init.c @@ -81,6 +81,7 @@ static void synth_filter_##opt(FFTContext *imdct, \ SYNTH_FILTER_FUNC(sse) #endif SYNTH_FILTER_FUNC(sse2) +SYNTH_FILTER_FUNC(avx) #endif /* HAVE_YASM */ av_cold void ff_synth_filter_init_x86(SynthFilterContext *s) @@ -96,5 +97,8 @@ av_cold void ff_synth_filter_init_x86(SynthFilterContext *s) if (EXTERNAL_SSE2(cpu_flags)) { s->synth_filter_float = synth_filter_sse2; } + if (EXTERNAL_AVX(cpu_flags)) { + s->synth_filter_float = synth_filter_avx; + } #endif /* HAVE_YASM */ } |