aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/i386
diff options
context:
space:
mode:
authorLoren Merritt <lorenm@u.washington.edu>2008-08-12 00:26:58 +0000
committerLoren Merritt <lorenm@u.washington.edu>2008-08-12 00:26:58 +0000
commit5d0ddd1a9fcdfbb6b24e75af4384e1d36a1d331e (patch)
tree7395fe9347c87a04885ace06959a8b0c0a940a7e /libavcodec/i386
parentbafad220a712f9b3a4fe8cdf5f94b79a9c62dd5a (diff)
downloadffmpeg-5d0ddd1a9fcdfbb6b24e75af4384e1d36a1d331e.tar.gz
split-radix FFT
c is 1.9x faster than previous c (on various x86 cpus), sse is 1.6x faster than previous sse. Originally committed as revision 14698 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/i386')
-rw-r--r--libavcodec/i386/fft_3dn.c111
-rw-r--r--libavcodec/i386/fft_3dn2.c111
-rw-r--r--libavcodec/i386/fft_mmx.asm467
-rw-r--r--libavcodec/i386/fft_sse.c143
4 files changed, 523 insertions, 309 deletions
diff --git a/libavcodec/i386/fft_3dn.c b/libavcodec/i386/fft_3dn.c
index 8bd7b89d01..6f2e2e8353 100644
--- a/libavcodec/i386/fft_3dn.c
+++ b/libavcodec/i386/fft_3dn.c
@@ -1,7 +1,6 @@
/*
* FFT/MDCT transform with 3DNow! optimizations
- * Copyright (c) 2006 Zuxy MENG Jie, Loren Merritt
- * Based on fft_sse.c copyright (c) 2002 Fabrice Bellard.
+ * Copyright (c) 2008 Loren Merritt
*
* This file is part of FFmpeg.
*
@@ -20,109 +19,5 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavutil/x86_cpu.h"
-#include "libavcodec/dsputil.h"
-
-static const int p1m1[2] __attribute__((aligned(8))) =
- { 0, 1 << 31 };
-
-static const int m1p1[2] __attribute__((aligned(8))) =
- { 1 << 31, 0 };
-
-void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z)
-{
- int ln = s->nbits;
- long j;
- x86_reg i;
- long nblocks, nloops;
- FFTComplex *p, *cptr;
-
- asm volatile(
- /* FEMMS is not a must here but recommended by AMD */
- "femms \n\t"
- "movq %0, %%mm7 \n\t"
- ::"m"(*(s->inverse ? m1p1 : p1m1))
- );
-
- i = 8 << ln;
- asm volatile(
- "1: \n\t"
- "sub $32, %0 \n\t"
- "movq (%0,%1), %%mm0 \n\t"
- "movq 16(%0,%1), %%mm1 \n\t"
- "movq 8(%0,%1), %%mm2 \n\t"
- "movq 24(%0,%1), %%mm3 \n\t"
- "movq %%mm0, %%mm4 \n\t"
- "movq %%mm1, %%mm5 \n\t"
- "pfadd %%mm2, %%mm0 \n\t"
- "pfadd %%mm3, %%mm1 \n\t"
- "pfsub %%mm2, %%mm4 \n\t"
- "pfsub %%mm3, %%mm5 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "punpckldq %%mm5, %%mm6 \n\t"
- "punpckhdq %%mm6, %%mm5 \n\t"
- "movq %%mm4, %%mm3 \n\t"
- "pxor %%mm7, %%mm5 \n\t"
- "pfadd %%mm1, %%mm0 \n\t"
- "pfadd %%mm5, %%mm4 \n\t"
- "pfsub %%mm1, %%mm2 \n\t"
- "pfsub %%mm5, %%mm3 \n\t"
- "movq %%mm0, (%0,%1) \n\t"
- "movq %%mm4, 8(%0,%1) \n\t"
- "movq %%mm2, 16(%0,%1) \n\t"
- "movq %%mm3, 24(%0,%1) \n\t"
- "jg 1b \n\t"
- :"+r"(i)
- :"r"(z)
- );
- /* pass 2 .. ln-1 */
-
- nblocks = 1 << (ln-3);
- nloops = 1 << 2;
- cptr = s->exptab1;
- do {
- p = z;
- j = nblocks;
- do {
- i = nloops*8;
- asm volatile(
- "1: \n\t"
- "sub $16, %0 \n\t"
- "movq (%1,%0), %%mm0 \n\t"
- "movq 8(%1,%0), %%mm1 \n\t"
- "movq (%2,%0), %%mm2 \n\t"
- "movq 8(%2,%0), %%mm3 \n\t"
- "movq %%mm2, %%mm4 \n\t"
- "movq %%mm3, %%mm5 \n\t"
- "punpckldq %%mm2, %%mm2 \n\t"
- "punpckldq %%mm3, %%mm3 \n\t"
- "punpckhdq %%mm4, %%mm4 \n\t"
- "punpckhdq %%mm5, %%mm5 \n\t"
- "pfmul (%3,%0,2), %%mm2 \n\t" // cre*re cim*re
- "pfmul 8(%3,%0,2), %%mm3 \n\t"
- "pfmul 16(%3,%0,2), %%mm4 \n\t" // -cim*im cre*im
- "pfmul 24(%3,%0,2), %%mm5 \n\t"
- "pfadd %%mm2, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im
- "pfadd %%mm3, %%mm5 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "movq %%mm1, %%mm3 \n\t"
- "pfadd %%mm4, %%mm0 \n\t"
- "pfadd %%mm5, %%mm1 \n\t"
- "pfsub %%mm4, %%mm2 \n\t"
- "pfsub %%mm5, %%mm3 \n\t"
- "movq %%mm0, (%1,%0) \n\t"
- "movq %%mm1, 8(%1,%0) \n\t"
- "movq %%mm2, (%2,%0) \n\t"
- "movq %%mm3, 8(%2,%0) \n\t"
- "jg 1b \n\t"
- :"+r"(i)
- :"r"(p), "r"(p + nloops), "r"(cptr)
- );
- p += nloops*2;
- } while (--j);
- cptr += nloops*2;
- nblocks >>= 1;
- nloops <<= 1;
- } while (nblocks != 0);
- asm volatile("femms");
-}
+#define EMULATE_3DNOWEXT
+#include "fft_3dn2.c"
diff --git a/libavcodec/i386/fft_3dn2.c b/libavcodec/i386/fft_3dn2.c
index 9068dff24b..635bc76658 100644
--- a/libavcodec/i386/fft_3dn2.c
+++ b/libavcodec/i386/fft_3dn2.c
@@ -23,105 +23,26 @@
#include "libavutil/x86_cpu.h"
#include "libavcodec/dsputil.h"
-static const int p1m1[2] __attribute__((aligned(8))) =
- { 0, 1 << 31 };
+#ifdef EMULATE_3DNOWEXT
+#define ff_fft_calc_3dn2 ff_fft_calc_3dn
+#define ff_fft_dispatch_3dn2 ff_fft_dispatch_3dn
+#define ff_fft_dispatch_interleave_3dn2 ff_fft_dispatch_interleave_3dn
+#define ff_imdct_calc_3dn2 ff_imdct_calc_3dn
+#define ff_imdct_half_3dn2 ff_imdct_half_3dn
+#endif
-static const int m1p1[2] __attribute__((aligned(8))) =
- { 1 << 31, 0 };
+void ff_fft_dispatch_3dn2(FFTComplex *z, int nbits);
+void ff_fft_dispatch_interleave_3dn2(FFTComplex *z, int nbits);
void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z)
{
- int ln = s->nbits;
- long j;
- x86_reg i;
- long nblocks, nloops;
- FFTComplex *p, *cptr;
-
- asm volatile(
- /* FEMMS is not a must here but recommended by AMD */
- "femms \n\t"
- "movq %0, %%mm7 \n\t"
- ::"m"(*(s->inverse ? m1p1 : p1m1))
- );
-
- i = 8 << ln;
- asm volatile(
- "1: \n\t"
- "sub $32, %0 \n\t"
- "movq (%0,%1), %%mm0 \n\t"
- "movq 16(%0,%1), %%mm1 \n\t"
- "movq 8(%0,%1), %%mm2 \n\t"
- "movq 24(%0,%1), %%mm3 \n\t"
- "movq %%mm0, %%mm4 \n\t"
- "movq %%mm1, %%mm5 \n\t"
- "pfadd %%mm2, %%mm0 \n\t"
- "pfadd %%mm3, %%mm1 \n\t"
- "pfsub %%mm2, %%mm4 \n\t"
- "pfsub %%mm3, %%mm5 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "pswapd %%mm5, %%mm5 \n\t"
- "movq %%mm4, %%mm3 \n\t"
- "pxor %%mm7, %%mm5 \n\t"
- "pfadd %%mm1, %%mm0 \n\t"
- "pfadd %%mm5, %%mm4 \n\t"
- "pfsub %%mm1, %%mm2 \n\t"
- "pfsub %%mm5, %%mm3 \n\t"
- "movq %%mm0, (%0,%1) \n\t"
- "movq %%mm4, 8(%0,%1) \n\t"
- "movq %%mm2, 16(%0,%1) \n\t"
- "movq %%mm3, 24(%0,%1) \n\t"
- "jg 1b \n\t"
- :"+r"(i)
- :"r"(z)
- );
- /* pass 2 .. ln-1 */
-
- nblocks = 1 << (ln-3);
- nloops = 1 << 2;
- cptr = s->exptab1;
- do {
- p = z;
- j = nblocks;
- do {
- i = nloops*8;
- asm volatile(
- "1: \n\t"
- "sub $16, %0 \n\t"
- "movq (%1,%0), %%mm0 \n\t"
- "movq 8(%1,%0), %%mm1 \n\t"
- "movq (%2,%0), %%mm2 \n\t"
- "movq 8(%2,%0), %%mm3 \n\t"
- "movq (%3,%0,2), %%mm4 \n\t"
- "movq 8(%3,%0,2), %%mm5 \n\t"
- "pswapd %%mm4, %%mm6 \n\t" // no need for cptr[2] & cptr[3]
- "pswapd %%mm5, %%mm7 \n\t"
- "pfmul %%mm2, %%mm4 \n\t" // cre*re cim*im
- "pfmul %%mm3, %%mm5 \n\t"
- "pfmul %%mm2, %%mm6 \n\t" // cim*re cre*im
- "pfmul %%mm3, %%mm7 \n\t"
- "pfpnacc %%mm6, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im
- "pfpnacc %%mm7, %%mm5 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "movq %%mm1, %%mm3 \n\t"
- "pfadd %%mm4, %%mm0 \n\t"
- "pfadd %%mm5, %%mm1 \n\t"
- "pfsub %%mm4, %%mm2 \n\t"
- "pfsub %%mm5, %%mm3 \n\t"
- "movq %%mm0, (%1,%0) \n\t"
- "movq %%mm1, 8(%1,%0) \n\t"
- "movq %%mm2, (%2,%0) \n\t"
- "movq %%mm3, 8(%2,%0) \n\t"
- "jg 1b \n\t"
- :"+r"(i)
- :"r"(p), "r"(p + nloops), "r"(cptr)
- );
- p += nloops*2;
- } while (--j);
- cptr += nloops*2;
- nblocks >>= 1;
- nloops <<= 1;
- } while (nblocks != 0);
+ int n = 1<<s->nbits;
+ int i;
+ ff_fft_dispatch_interleave_3dn2(z, s->nbits);
asm volatile("femms");
+ if(n <= 8)
+ for(i=0; i<n; i+=2)
+ FFSWAP(FFTSample, z[i].im, z[i+1].re);
}
static void imdct_3dn2(MDCTContext *s, const FFTSample *input, FFTSample *tmp)
@@ -162,7 +83,7 @@ static void imdct_3dn2(MDCTContext *s, const FFTSample *input, FFTSample *tmp)
);
}
- ff_fft_calc(&s->fft, z);
+ ff_fft_calc_3dn2(&s->fft, z);
/* post rotation + reordering */
for(k = 0; k < n4; k++) {
diff --git a/libavcodec/i386/fft_mmx.asm b/libavcodec/i386/fft_mmx.asm
new file mode 100644
index 0000000000..c0a9bd5b6b
--- /dev/null
+++ b/libavcodec/i386/fft_mmx.asm
@@ -0,0 +1,467 @@
+;******************************************************************************
+;* FFT transform with SSE/3DNow optimizations
+;* Copyright (c) 2008 Loren Merritt
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+; These functions are not individually interchangeable with the C versions.
+; While C takes arrays of FFTComplex, SSE/3DNow leave intermediate results
+; in blocks as conventient to the vector size.
+; i.e. {4x real, 4x imaginary, 4x real, ...} (or 2x respectively)
+
+%include "x86inc.asm"
+
+SECTION_RODATA
+
+%define M_SQRT1_2 0.70710678118654752440
+ps_root2: times 4 dd M_SQRT1_2
+ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
+ps_m1p1: dd 1<<31, 0
+
+%assign i 16
+%rep 13
+cextern ff_cos_ %+ i
+%assign i i<<1
+%endrep
+
+%ifdef ARCH_X86_64
+ %define pointer dq
+%else
+ %define pointer dd
+%endif
+
+%macro IF0 1+
+%endmacro
+%macro IF1 1+
+ %1
+%endmacro
+
+section .text align=16
+
+%macro T2_3DN 4 ; z0, z1, mem0, mem1
+ mova %1, %3
+ mova %2, %1
+ pfadd %1, %4
+ pfsub %2, %4
+%endmacro
+
+%macro T4_3DN 6 ; z0, z1, z2, z3, tmp0, tmp1
+ mova %5, %3
+ pfsub %3, %4
+ pfadd %5, %4 ; {t6,t5}
+ pxor %3, [ps_m1p1 GLOBAL] ; {t8,t7}
+ mova %6, %1
+ pswapd %3, %3
+ pfadd %1, %5 ; {r0,i0}
+ pfsub %6, %5 ; {r2,i2}
+ mova %4, %2
+ pfadd %2, %3 ; {r1,i1}
+ pfsub %4, %3 ; {r3,i3}
+ SWAP %3, %6
+%endmacro
+
+; in: %1={r0,i0,r1,i1} %2={r2,i2,r3,i3}
+; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3}
+%macro T4_SSE 3
+ mova %3, %1
+ shufps %1, %2, 0x64 ; {r0,i0,r3,i2}
+ shufps %3, %2, 0xce ; {r1,i1,r2,i3}
+ mova %2, %1
+ addps %1, %3 ; {t1,t2,t6,t5}
+ subps %2, %3 ; {t3,t4,t8,t7}
+ mova %3, %1
+ shufps %1, %2, 0x44 ; {t1,t2,t3,t4}
+ shufps %3, %2, 0xbe ; {t6,t5,t7,t8}
+ mova %2, %1
+ addps %1, %3 ; {r0,i0,r1,i1}
+ subps %2, %3 ; {r2,i2,r3,i3}
+ mova %3, %1
+ shufps %1, %2, 0x88 ; {r0,r1,r2,r3}
+ shufps %3, %2, 0xdd ; {i0,i1,i2,i3}
+ SWAP %2, %3
+%endmacro
+
+%macro T8_SSE 6 ; r0,i0,r1,i1,t0,t1
+ mova %5, %3
+ shufps %3, %4, 0x44 ; {r4,i4,r6,i6}
+ shufps %5, %4, 0xee ; {r5,i5,r7,i7}
+ mova %6, %3
+ subps %3, %5 ; {r5,i5,r7,i7}
+ addps %6, %5 ; {t1,t2,t3,t4}
+ mova %5, %3
+ shufps %5, %5, 0xb1 ; {i5,r5,i7,r7}
+ mulps %3, [ps_root2mppm GLOBAL] ; {-r5,i5,r7,-i7}
+ mulps %5, [ps_root2 GLOBAL]
+ addps %3, %5 ; {t8,t7,ta,t9}
+ mova %5, %6
+ shufps %6, %3, 0x36 ; {t3,t2,t9,t8}
+ shufps %5, %3, 0x9c ; {t1,t4,t7,ta}
+ mova %3, %6
+ addps %6, %5 ; {t1,t2,t9,ta}
+ subps %3, %5 ; {t6,t5,tc,tb}
+ mova %5, %6
+ shufps %6, %3, 0xd8 ; {t1,t9,t5,tb}
+ shufps %5, %3, 0x8d ; {t2,ta,t6,tc}
+ mova %3, %1
+ mova %4, %2
+ addps %1, %6 ; {r0,r1,r2,r3}
+ addps %2, %5 ; {i0,i1,i2,i3}
+ subps %3, %6 ; {r4,r5,r6,r7}
+ subps %4, %5 ; {i4,i5,i6,i7}
+%endmacro
+
+; scheduled for cpu-bound sizes
+%macro PASS_SMALL 3 ; (to load m4-m7), wre, wim
+IF%1 mova m4, Z(4)
+IF%1 mova m5, Z(5)
+ mova m0, %2 ; wre
+ mova m2, m4
+ mova m1, %3 ; wim
+ mova m3, m5
+ mulps m2, m0 ; r2*wre
+IF%1 mova m6, Z(6)
+ mulps m3, m1 ; i2*wim
+IF%1 mova m7, Z(7)
+ mulps m4, m1 ; r2*wim
+ mulps m5, m0 ; i2*wre
+ addps m2, m3 ; r2*wre + i2*wim
+ mova m3, m1
+ mulps m1, m6 ; r3*wim
+ subps m5, m4 ; i2*wre - r2*wim
+ mova m4, m0
+ mulps m3, m7 ; i3*wim
+ mulps m4, m6 ; r3*wre
+ mulps m0, m7 ; i3*wre
+ subps m4, m3 ; r3*wre - i3*wim
+ mova m3, Z(0)
+ addps m0, m1 ; i3*wre + r3*wim
+ mova m1, m4
+ addps m4, m2 ; t5
+ subps m1, m2 ; t3
+ subps m3, m4 ; r2
+ addps m4, Z(0) ; r0
+ mova m6, Z(2)
+ mova Z(4), m3
+ mova Z(0), m4
+ mova m3, m5
+ subps m5, m0 ; t4
+ mova m4, m6
+ subps m6, m5 ; r3
+ addps m5, m4 ; r1
+ mova Z(6), m6
+ mova Z(2), m5
+ mova m2, Z(3)
+ addps m3, m0 ; t6
+ subps m2, m1 ; i3
+ mova m7, Z(1)
+ addps m1, Z(3) ; i1
+ mova Z(7), m2
+ mova Z(3), m1
+ mova m4, m7
+ subps m7, m3 ; i2
+ addps m3, m4 ; i0
+ mova Z(5), m7
+ mova Z(1), m3
+%endmacro
+
+; scheduled to avoid store->load aliasing
+%macro PASS_BIG 1 ; (!interleave)
+ mova m4, Z(4) ; r2
+ mova m5, Z(5) ; i2
+ mova m2, m4
+ mova m0, [wq] ; wre
+ mova m3, m5
+ mova m1, [wq+o1q] ; wim
+ mulps m2, m0 ; r2*wre
+ mova m6, Z(6) ; r3
+ mulps m3, m1 ; i2*wim
+ mova m7, Z(7) ; i3
+ mulps m4, m1 ; r2*wim
+ mulps m5, m0 ; i2*wre
+ addps m2, m3 ; r2*wre + i2*wim
+ mova m3, m1
+ mulps m1, m6 ; r3*wim
+ subps m5, m4 ; i2*wre - r2*wim
+ mova m4, m0
+ mulps m3, m7 ; i3*wim
+ mulps m4, m6 ; r3*wre
+ mulps m0, m7 ; i3*wre
+ subps m4, m3 ; r3*wre - i3*wim
+ mova m3, Z(0)
+ addps m0, m1 ; i3*wre + r3*wim
+ mova m1, m4
+ addps m4, m2 ; t5
+ subps m1, m2 ; t3
+ subps m3, m4 ; r2
+ addps m4, Z(0) ; r0
+ mova m6, Z(2)
+ mova Z(4), m3
+ mova Z(0), m4
+ mova m3, m5
+ subps m5, m0 ; t4
+ mova m4, m6
+ subps m6, m5 ; r3
+ addps m5, m4 ; r1
+IF%1 mova Z(6), m6
+IF%1 mova Z(2), m5
+ mova m2, Z(3)
+ addps m3, m0 ; t6
+ subps m2, m1 ; i3
+ mova m7, Z(1)
+ addps m1, Z(3) ; i1
+IF%1 mova Z(7), m2
+IF%1 mova Z(3), m1
+ mova m4, m7
+ subps m7, m3 ; i2
+ addps m3, m4 ; i0
+IF%1 mova Z(5), m7
+IF%1 mova Z(1), m3
+%if %1==0
+ mova m4, m5 ; r1
+ mova m0, m6 ; r3
+ unpcklps m5, m1
+ unpckhps m4, m1
+ unpcklps m6, m2
+ unpckhps m0, m2
+ mova m1, Z(0)
+ mova m2, Z(4)
+ mova Z(2), m5
+ mova Z(3), m4
+ mova Z(6), m6
+ mova Z(7), m0
+ mova m5, m1 ; r0
+ mova m4, m2 ; r2
+ unpcklps m1, m3
+ unpckhps m5, m3
+ unpcklps m2, m7
+ unpckhps m4, m7
+ mova Z(0), m1
+ mova Z(1), m5
+ mova Z(4), m2
+ mova Z(5), m4
+%endif
+%endmacro
+
+%macro PUNPCK 3
+ mova %3, %1
+ punpckldq %1, %2
+ punpckhdq %3, %2
+%endmacro
+
+INIT_XMM
+
+%define Z(x) [r0+mmsize*x]
+
+align 16
+fft4_sse:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T4_SSE m0, m1, m2
+ mova Z(0), m0
+ mova Z(1), m1
+ ret
+
+align 16
+fft8_sse:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T4_SSE m0, m1, m2
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T8_SSE m0, m1, m2, m3, m4, m5
+ mova Z(0), m0
+ mova Z(1), m1
+ mova Z(2), m2
+ mova Z(3), m3
+ ret
+
+align 16
+fft16_sse:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T4_SSE m0, m1, m2
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T8_SSE m0, m1, m2, m3, m4, m5
+ mova m4, Z(4)
+ mova m5, Z(5)
+ mova Z(0), m0
+ mova Z(1), m1
+ mova Z(2), m2
+ mova Z(3), m3
+ T4_SSE m4, m5, m6
+ mova m6, Z(6)
+ mova m7, Z(7)
+ T4_SSE m6, m7, m0
+ PASS_SMALL 0, [ff_cos_16 GLOBAL], [ff_cos_16+16 GLOBAL]
+ ret
+
+
+INIT_MMX
+
+%macro FFT48_3DN 1
+align 16
+fft4%1:
+ T2_3DN m0, m1, Z(0), Z(1)
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T4_3DN m0, m1, m2, m3, m4, m5
+ PUNPCK m0, m1, m4
+ PUNPCK m2, m3, m5
+ mova Z(0), m0
+ mova Z(1), m4
+ mova Z(2), m2
+ mova Z(3), m5
+ ret
+
+align 16
+fft8%1:
+ T2_3DN m0, m1, Z(0), Z(1)
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T4_3DN m0, m1, m2, m3, m4, m5
+ mova Z(0), m0
+ mova Z(2), m2
+ T2_3DN m4, m5, Z(4), Z(5)
+ T2_3DN m6, m7, Z(6), Z(7)
+ pswapd m0, m5
+ pswapd m2, m7
+ pxor m0, [ps_m1p1 GLOBAL]
+ pxor m2, [ps_m1p1 GLOBAL]
+ pfsub m5, m0
+ pfadd m7, m2
+ pfmul m5, [ps_root2 GLOBAL]
+ pfmul m7, [ps_root2 GLOBAL]
+ T4_3DN m1, m3, m5, m7, m0, m2
+ mova Z(5), m5
+ mova Z(7), m7
+ mova m0, Z(0)
+ mova m2, Z(2)
+ T4_3DN m0, m2, m4, m6, m5, m7
+ PUNPCK m0, m1, m5
+ PUNPCK m2, m3, m7
+ mova Z(0), m0
+ mova Z(1), m5
+ mova Z(2), m2
+ mova Z(3), m7
+ PUNPCK m4, Z(5), m5
+ PUNPCK m6, Z(7), m7
+ mova Z(4), m4
+ mova Z(5), m5
+ mova Z(6), m6
+ mova Z(7), m7
+ ret
+%endmacro
+
+FFT48_3DN _3dn2
+
+%macro pswapd 2
+%ifidn %1, %2
+ movd [r0+12], %1
+ punpckhdq %1, [r0+8]
+%else
+ movq %1, %2
+ psrlq %1, 32
+ punpckldq %1, %2
+%endif
+%endmacro
+
+FFT48_3DN _3dn
+
+
+%define Z(x) [zq + o1q*(x&6)*((x/6)^1) + o3q*(x/6) + mmsize*(x&1)]
+
+%macro DECL_PASS 2+ ; name, payload
+align 16
+%1:
+DEFINE_ARGS z, w, n, o1, o3
+ lea o3q, [nq*3]
+ lea o1q, [nq*8]
+ shl o3q, 4
+.loop:
+ %2
+ add zq, mmsize*2
+ add wq, mmsize
+ sub nd, mmsize/8
+ jg .loop
+ rep ret
+%endmacro
+
+INIT_XMM
+DECL_PASS pass_sse, PASS_BIG 1
+DECL_PASS pass_interleave_sse, PASS_BIG 0
+
+INIT_MMX
+%define mulps pfmul
+%define addps pfadd
+%define subps pfsub
+%define unpcklps punpckldq
+%define unpckhps punpckhdq
+DECL_PASS pass_3dn, PASS_SMALL 1, [wq], [wq+o1q]
+DECL_PASS pass_interleave_3dn, PASS_BIG 0
+%define pass_3dn2 pass_3dn
+%define pass_interleave_3dn2 pass_interleave_3dn
+
+
+%macro DECL_FFT 2-3 ; nbits, cpu, suffix
+%xdefine list_of_fft fft4%2, fft8%2
+%if %1==5
+%xdefine list_of_fft list_of_fft, fft16%2
+%endif
+
+%assign n 1<<%1
+%rep 17-%1
+%assign n2 n/2
+%assign n4 n/4
+%xdefine list_of_fft list_of_fft, fft %+ n %+ %3%2
+
+align 16
+fft %+ n %+ %3%2:
+ call fft %+ n2 %+ %2
+ add r0, n*4 - (n&(-2<<%1))
+ call fft %+ n4 %+ %2
+ add r0, n*2 - (n2&(-2<<%1))
+ call fft %+ n4 %+ %2
+ sub r0, n*6 + (n2&(-2<<%1))
+ lea r1, [ff_cos_ %+ n GLOBAL]
+ mov r2d, n4/2
+ jmp pass%3%2
+
+%assign n n*2
+%endrep
+%undef n
+
+align 8
+dispatch_tab%3%2: pointer list_of_fft
+
+; On x86_32, this function does the register saving and restoring for all of fft.
+; The others pass args in registers and don't spill anything.
+cglobal ff_fft_dispatch%3%2, 2,5,0, z, nbits
+ lea r2, [dispatch_tab%3%2 GLOBAL]
+ mov r2, [r2 + (nbitsq-2)*gprsize]
+ call r2
+ RET
+%endmacro ; DECL_FFT
+
+DECL_FFT 5, _sse
+DECL_FFT 5, _sse, _interleave
+DECL_FFT 4, _3dn
+DECL_FFT 4, _3dn, _interleave
+DECL_FFT 4, _3dn2
+DECL_FFT 4, _3dn2, _interleave
+
diff --git a/libavcodec/i386/fft_sse.c b/libavcodec/i386/fft_sse.c
index 305f44a0ce..77a579011a 100644
--- a/libavcodec/i386/fft_sse.c
+++ b/libavcodec/i386/fft_sse.c
@@ -22,124 +22,55 @@
#include "libavutil/x86_cpu.h"
#include "libavcodec/dsputil.h"
-static const int p1p1p1m1[4] __attribute__((aligned(16))) =
- { 0, 0, 0, 1 << 31 };
-
-static const int p1p1m1p1[4] __attribute__((aligned(16))) =
- { 0, 0, 1 << 31, 0 };
-
-static const int p1p1m1m1[4] __attribute__((aligned(16))) =
- { 0, 0, 1 << 31, 1 << 31 };
-
static const int p1m1p1m1[4] __attribute__((aligned(16))) =
{ 0, 1 << 31, 0, 1 << 31 };
static const int m1m1m1m1[4] __attribute__((aligned(16))) =
{ 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
-#if 0
-static void print_v4sf(const char *str, __m128 a)
-{
- float *p = (float *)&a;
- printf("%s: %f %f %f %f\n",
- str, p[0], p[1], p[2], p[3]);
-}
-#endif
+void ff_fft_dispatch_sse(FFTComplex *z, int nbits);
+void ff_fft_dispatch_interleave_sse(FFTComplex *z, int nbits);
-/* XXX: handle reverse case */
void ff_fft_calc_sse(FFTContext *s, FFTComplex *z)
{
- int ln = s->nbits;
- x86_reg i;
- long j;
- long nblocks, nloops;
- FFTComplex *p, *cptr;
+ int n = 1 << s->nbits;
- asm volatile(
- "movaps %0, %%xmm4 \n\t"
- "movaps %1, %%xmm5 \n\t"
- ::"m"(*p1p1m1m1),
- "m"(*(s->inverse ? p1p1m1p1 : p1p1p1m1))
- );
+ ff_fft_dispatch_interleave_sse(z, s->nbits);
- i = 8 << ln;
- asm volatile(
- "1: \n\t"
- "sub $32, %0 \n\t"
- /* do the pass 0 butterfly */
- "movaps (%0,%1), %%xmm0 \n\t"
- "movaps %%xmm0, %%xmm1 \n\t"
- "shufps $0x4E, %%xmm0, %%xmm0 \n\t"
- "xorps %%xmm4, %%xmm1 \n\t"
- "addps %%xmm1, %%xmm0 \n\t"
- "movaps 16(%0,%1), %%xmm2 \n\t"
- "movaps %%xmm2, %%xmm3 \n\t"
- "shufps $0x4E, %%xmm2, %%xmm2 \n\t"
- "xorps %%xmm4, %%xmm3 \n\t"
- "addps %%xmm3, %%xmm2 \n\t"
- /* multiply third by -i */
- /* by toggling the sign bit */
- "shufps $0xB4, %%xmm2, %%xmm2 \n\t"
- "xorps %%xmm5, %%xmm2 \n\t"
- /* do the pass 1 butterfly */
- "movaps %%xmm0, %%xmm1 \n\t"
- "addps %%xmm2, %%xmm0 \n\t"
- "subps %%xmm2, %%xmm1 \n\t"
- "movaps %%xmm0, (%0,%1) \n\t"
- "movaps %%xmm1, 16(%0,%1) \n\t"
- "jg 1b \n\t"
- :"+r"(i)
- :"r"(z)
- );
- /* pass 2 .. ln-1 */
+ if(n <= 16) {
+ x86_reg i = -8*n;
+ asm volatile(
+ "1: \n"
+ "movaps (%0,%1), %%xmm0 \n"
+ "movaps %%xmm0, %%xmm1 \n"
+ "unpcklps 16(%0,%1), %%xmm0 \n"
+ "unpckhps 16(%0,%1), %%xmm1 \n"
+ "movaps %%xmm0, (%0,%1) \n"
+ "movaps %%xmm1, 16(%0,%1) \n"
+ "add $32, %0 \n"
+ "jl 1b \n"
+ :"+r"(i)
+ :"r"(z+n)
+ :"memory"
+ );
+ }
+}
- nblocks = 1 << (ln-3);
- nloops = 1 << 2;
- cptr = s->exptab1;
- do {
- p = z;
- j = nblocks;
- do {
- i = nloops*8;
- asm volatile(
- "1: \n\t"
- "sub $32, %0 \n\t"
- "movaps (%2,%0), %%xmm1 \n\t"
- "movaps (%1,%0), %%xmm0 \n\t"
- "movaps 16(%2,%0), %%xmm5 \n\t"
- "movaps 16(%1,%0), %%xmm4 \n\t"
- "movaps %%xmm1, %%xmm2 \n\t"
- "movaps %%xmm5, %%xmm6 \n\t"
- "shufps $0xA0, %%xmm1, %%xmm1 \n\t"
- "shufps $0xF5, %%xmm2, %%xmm2 \n\t"
- "shufps $0xA0, %%xmm5, %%xmm5 \n\t"
- "shufps $0xF5, %%xmm6, %%xmm6 \n\t"
- "mulps (%3,%0,2), %%xmm1 \n\t" // cre*re cim*re
- "mulps 16(%3,%0,2), %%xmm2 \n\t" // -cim*im cre*im
- "mulps 32(%3,%0,2), %%xmm5 \n\t" // cre*re cim*re
- "mulps 48(%3,%0,2), %%xmm6 \n\t" // -cim*im cre*im
- "addps %%xmm2, %%xmm1 \n\t"
- "addps %%xmm6, %%xmm5 \n\t"
- "movaps %%xmm0, %%xmm3 \n\t"
- "movaps %%xmm4, %%xmm7 \n\t"
- "addps %%xmm1, %%xmm0 \n\t"
- "subps %%xmm1, %%xmm3 \n\t"
- "addps %%xmm5, %%xmm4 \n\t"
- "subps %%xmm5, %%xmm7 \n\t"
- "movaps %%xmm0, (%1,%0) \n\t"
- "movaps %%xmm3, (%2,%0) \n\t"
- "movaps %%xmm4, 16(%1,%0) \n\t"
- "movaps %%xmm7, 16(%2,%0) \n\t"
- "jg 1b \n\t"
- :"+r"(i)
- :"r"(p), "r"(p + nloops), "r"(cptr)
- );
- p += nloops*2;
- } while (--j);
- cptr += nloops*2;
- nblocks >>= 1;
- nloops <<= 1;
- } while (nblocks != 0);
+void ff_fft_permute_sse(FFTContext *s, FFTComplex *z)
+{
+ int n = 1 << s->nbits;
+ int i;
+ for(i=0; i<n; i+=2) {
+ asm volatile(
+ "movaps %2, %%xmm0 \n"
+ "movlps %%xmm0, %0 \n"
+ "movhps %%xmm0, %1 \n"
+ :"=m"(s->tmp_buf[s->revtab[i]]),
+ "=m"(s->tmp_buf[s->revtab[i+1]])
+ :"m"(z[i])
+ );
+ }
+ memcpy(z, s->tmp_buf, n*sizeof(FFTComplex));
}
static void imdct_sse(MDCTContext *s, const FFTSample *input, FFTSample *tmp)