aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2013-11-04 19:34:04 +0100
committerMichael Niedermayer <michaelni@gmx.at>2013-11-04 19:34:04 +0100
commit8528feb13cd320716478a5033919cb7656b41863 (patch)
treed045e7816f00d2f2377f17f9cbde0372f591a056
parent2e57d4ba24b1c7806375ef05a56c3627a97d21db (diff)
parentd2f4846591727fedcc2b452b688da8da09ee8305 (diff)
downloadffmpeg-8528feb13cd320716478a5033919cb7656b41863.tar.gz
Merge commit 'd2f4846591727fedcc2b452b688da8da09ee8305' into release/0.10
* commit 'd2f4846591727fedcc2b452b688da8da09ee8305': Prepare for 0.8.7 Release x86: fft: Remove 3DNow! optimizations, they break FATE x86: ac3dsp: Drop mmx variant of ac3_max_msb_abs_int16 Conflicts: RELEASE libavcodec/x86/fft_3dn.c libavcodec/x86/fft_3dn2.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r--libavcodec/x86/Makefile2
-rw-r--r--libavcodec/x86/ac3dsp.asm9
-rw-r--r--libavcodec/x86/ac3dsp_mmx.c2
-rw-r--r--libavcodec/x86/fft.c10
-rw-r--r--libavcodec/x86/fft_3dn.c23
-rw-r--r--libavcodec/x86/fft_3dn2.c174
6 files changed, 0 insertions, 220 deletions
diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile
index 1066b20139..2d28b299a1 100644
--- a/libavcodec/x86/Makefile
+++ b/libavcodec/x86/Makefile
@@ -3,8 +3,6 @@ OBJS-$(CONFIG_TRUEHD_DECODER) += x86/mlpdsp.o
YASM-OBJS-$(CONFIG_DCT) += x86/dct32_sse.o
-YASM-OBJS-FFT-$(HAVE_AMD3DNOW) += x86/fft_3dn.o
-YASM-OBJS-FFT-$(HAVE_AMD3DNOWEXT) += x86/fft_3dn2.o
YASM-OBJS-FFT-$(HAVE_SSE) += x86/fft_sse.o
YASM-OBJS-$(CONFIG_FFT) += x86/fft_mmx.o \
$(YASM-OBJS-FFT-yes)
diff --git a/libavcodec/x86/ac3dsp.asm b/libavcodec/x86/ac3dsp.asm
index 300660dc5d..26f5616b1d 100644
--- a/libavcodec/x86/ac3dsp.asm
+++ b/libavcodec/x86/ac3dsp.asm
@@ -86,7 +86,6 @@ AC3_EXPONENT_MIN sse2
; This function uses 2 different methods to calculate a valid result.
; 1) logical 'or' of abs of each element
; This is used for ssse3 because of the pabsw instruction.
-; It is also used for mmx because of the lack of min/max instructions.
; 2) calculate min/max for the array, then or(abs(min),abs(max))
; This is used for mmxext and sse2 because they have pminsw/pmaxsw.
;-----------------------------------------------------------------------------
@@ -104,15 +103,9 @@ cglobal ac3_max_msb_abs_int16_%1, 2,2,5, src, len
pmaxsw m3, m0
pmaxsw m3, m1
%else ; or_abs
-%ifidn %1, mmx
- mova m0, [srcq]
- mova m1, [srcq+mmsize]
- ABS2 m0, m1, m3, m4
-%else ; ssse3
; using memory args is faster for ssse3
pabsw m0, [srcq]
pabsw m1, [srcq+mmsize]
-%endif
por m2, m0
por m2, m1
%endif
@@ -137,9 +130,7 @@ cglobal ac3_max_msb_abs_int16_%1, 2,2,5, src, len
%endmacro
INIT_MMX
-%define ABS2 ABS2_MMX
%define PSHUFLW pshufw
-AC3_MAX_MSB_ABS_INT16 mmx, or_abs
%define ABS2 ABS2_MMX2
AC3_MAX_MSB_ABS_INT16 mmxext, min_max
INIT_XMM
diff --git a/libavcodec/x86/ac3dsp_mmx.c b/libavcodec/x86/ac3dsp_mmx.c
index 9578e98d8b..df5e77a612 100644
--- a/libavcodec/x86/ac3dsp_mmx.c
+++ b/libavcodec/x86/ac3dsp_mmx.c
@@ -27,7 +27,6 @@ extern void ff_ac3_exponent_min_mmx (uint8_t *exp, int num_reuse_blocks, int n
extern void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
extern void ff_ac3_exponent_min_sse2 (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
-extern int ff_ac3_max_msb_abs_int16_mmx (const int16_t *src, int len);
extern int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
extern int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
extern int ff_ac3_max_msb_abs_int16_ssse3 (const int16_t *src, int len);
@@ -55,7 +54,6 @@ av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
if (mm_flags & AV_CPU_FLAG_MMX) {
c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
- c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
}
diff --git a/libavcodec/x86/fft.c b/libavcodec/x86/fft.c
index d2d157c2d3..7918fcb3fd 100644
--- a/libavcodec/x86/fft.c
+++ b/libavcodec/x86/fft.c
@@ -39,16 +39,6 @@ av_cold void ff_fft_init_mmx(FFTContext *s)
s->fft_permute = ff_fft_permute_sse;
s->fft_calc = ff_fft_calc_sse;
s->fft_permutation = FF_FFT_PERM_SWAP_LSBS;
- } else if (has_vectors & AV_CPU_FLAG_3DNOWEXT && HAVE_AMD3DNOWEXT) {
- /* 3DNowEx for K7 */
- s->imdct_calc = ff_imdct_calc_3dn2;
- s->imdct_half = ff_imdct_half_3dn2;
- s->fft_calc = ff_fft_calc_3dn2;
- } else if (has_vectors & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW) {
- /* 3DNow! for K6-2/3 */
- s->imdct_calc = ff_imdct_calc_3dn;
- s->imdct_half = ff_imdct_half_3dn;
- s->fft_calc = ff_fft_calc_3dn;
}
#endif
}
diff --git a/libavcodec/x86/fft_3dn.c b/libavcodec/x86/fft_3dn.c
deleted file mode 100644
index 6f2e2e8353..0000000000
--- a/libavcodec/x86/fft_3dn.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * FFT/MDCT transform with 3DNow! optimizations
- * Copyright (c) 2008 Loren Merritt
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#define EMULATE_3DNOWEXT
-#include "fft_3dn2.c"
diff --git a/libavcodec/x86/fft_3dn2.c b/libavcodec/x86/fft_3dn2.c
deleted file mode 100644
index 7a6cac14c4..0000000000
--- a/libavcodec/x86/fft_3dn2.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * FFT/MDCT transform with Extended 3DNow! optimizations
- * Copyright (c) 2006-2008 Zuxy MENG Jie, Loren Merritt
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/x86_cpu.h"
-#include "libavcodec/dsputil.h"
-#include "fft.h"
-
-DECLARE_ALIGNED(8, static const unsigned int, m1m1)[2] = { 1U<<31, 1U<<31 };
-
-#ifdef EMULATE_3DNOWEXT
-#define PSWAPD(s,d)\
- "movq "#s","#d"\n"\
- "psrlq $32,"#d"\n"\
- "punpckldq "#s","#d"\n"
-#define ff_fft_calc_3dn2 ff_fft_calc_3dn
-#define ff_fft_dispatch_3dn2 ff_fft_dispatch_3dn
-#define ff_fft_dispatch_interleave_3dn2 ff_fft_dispatch_interleave_3dn
-#define ff_imdct_calc_3dn2 ff_imdct_calc_3dn
-#define ff_imdct_half_3dn2 ff_imdct_half_3dn
-#else
-#define PSWAPD(s,d) "pswapd "#s","#d"\n"
-#endif
-
-void ff_fft_dispatch_3dn2(FFTComplex *z, int nbits);
-void ff_fft_dispatch_interleave_3dn2(FFTComplex *z, int nbits);
-
-void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z)
-{
- int n = 1<<s->nbits;
- int i;
- ff_fft_dispatch_interleave_3dn2(z, s->nbits);
- __asm__ volatile("femms");
- if(n <= 8)
- for(i=0; i<n; i+=2)
- FFSWAP(FFTSample, z[i].im, z[i+1].re);
-}
-
-void ff_imdct_half_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input)
-{
- x86_reg j, k;
- long n = s->mdct_size;
- long n2 = n >> 1;
- long n4 = n >> 2;
- long n8 = n >> 3;
- const uint16_t *revtab = s->revtab;
- const FFTSample *tcos = s->tcos;
- const FFTSample *tsin = s->tsin;
- const FFTSample *in1, *in2;
- FFTComplex *z = (FFTComplex *)output;
-
- /* pre rotation */
- in1 = input;
- in2 = input + n2 - 1;
-#ifdef EMULATE_3DNOWEXT
- __asm__ volatile("movd %0, %%mm7" ::"r"(1U<<31));
-#endif
- for(k = 0; k < n4; k++) {
- // FIXME a single block is faster, but gcc 2.95 and 3.4.x on 32bit can't compile it
- __asm__ volatile(
- "movd %0, %%mm0 \n"
- "movd %2, %%mm1 \n"
- "punpckldq %1, %%mm0 \n"
- "punpckldq %3, %%mm1 \n"
- "movq %%mm0, %%mm2 \n"
- PSWAPD( %%mm1, %%mm3 )
- "pfmul %%mm1, %%mm0 \n"
- "pfmul %%mm3, %%mm2 \n"
-#ifdef EMULATE_3DNOWEXT
- "movq %%mm0, %%mm1 \n"
- "punpckhdq %%mm2, %%mm0 \n"
- "punpckldq %%mm2, %%mm1 \n"
- "pxor %%mm7, %%mm0 \n"
- "pfadd %%mm1, %%mm0 \n"
-#else
- "pfpnacc %%mm2, %%mm0 \n"
-#endif
- ::"m"(in2[-2*k]), "m"(in1[2*k]),
- "m"(tcos[k]), "m"(tsin[k])
- );
- __asm__ volatile(
- "movq %%mm0, %0 \n\t"
- :"=m"(z[revtab[k]])
- );
- }
-
- ff_fft_dispatch_3dn2(z, s->nbits);
-
-#define CMUL(j,mm0,mm1)\
- "movq (%2,"#j",2), %%mm6 \n"\
- "movq 8(%2,"#j",2), "#mm0"\n"\
- "movq %%mm6, "#mm1"\n"\
- "movq "#mm0",%%mm7 \n"\
- "pfmul (%3,"#j"), %%mm6 \n"\
- "pfmul (%4,"#j"), "#mm0"\n"\
- "pfmul (%4,"#j"), "#mm1"\n"\
- "pfmul (%3,"#j"), %%mm7 \n"\
- "pfsub %%mm6, "#mm0"\n"\
- "pfadd %%mm7, "#mm1"\n"
-
- /* post rotation */
- j = -n2;
- k = n2-8;
- __asm__ volatile(
- "1: \n"
- CMUL(%0, %%mm0, %%mm1)
- CMUL(%1, %%mm2, %%mm3)
- "movd %%mm0, (%2,%0,2) \n"
- "movd %%mm1,12(%2,%1,2) \n"
- "movd %%mm2, (%2,%1,2) \n"
- "movd %%mm3,12(%2,%0,2) \n"
- "psrlq $32, %%mm0 \n"
- "psrlq $32, %%mm1 \n"
- "psrlq $32, %%mm2 \n"
- "psrlq $32, %%mm3 \n"
- "movd %%mm0, 8(%2,%0,2) \n"
- "movd %%mm1, 4(%2,%1,2) \n"
- "movd %%mm2, 8(%2,%1,2) \n"
- "movd %%mm3, 4(%2,%0,2) \n"
- "sub $8, %1 \n"
- "add $8, %0 \n"
- "jl 1b \n"
- :"+r"(j), "+r"(k)
- :"r"(z+n8), "r"(tcos+n8), "r"(tsin+n8)
- :"memory"
- );
- __asm__ volatile("femms");
-}
-
-void ff_imdct_calc_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input)
-{
- x86_reg j, k;
- long n = s->mdct_size;
- long n4 = n >> 2;
-
- ff_imdct_half_3dn2(s, output+n4, input);
-
- j = -n;
- k = n-8;
- __asm__ volatile(
- "movq %4, %%mm7 \n"
- "1: \n"
- PSWAPD((%2,%1), %%mm0)
- PSWAPD((%3,%0), %%mm1)
- "pxor %%mm7, %%mm0 \n"
- "movq %%mm1, (%3,%1) \n"
- "movq %%mm0, (%2,%0) \n"
- "sub $8, %1 \n"
- "add $8, %0 \n"
- "jl 1b \n"
- :"+r"(j), "+r"(k)
- :"r"(output+n4), "r"(output+n4*3),
- "m"(*m1m1)
- );
- __asm__ volatile("femms");
-}
-