aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2012-07-08 19:28:57 +0200
committerDiego Biurrun <diego@biurrun.de>2012-10-31 01:04:55 +0100
commit588fafe7f3bdce1b7265b74320e9bdfad3e25960 (patch)
tree2f63a805c9281c8496e75270035e36ffef7ead04 /libavcodec
parenta65bdceb060628881578afb29df4eb222421381f (diff)
downloadffmpeg-588fafe7f3bdce1b7265b74320e9bdfad3e25960.tar.gz
x86: MMX2 ---> MMXEXT in macro names
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/x86/ac3dsp.asm2
-rw-r--r--libavcodec/x86/cavsdsp.c4
-rw-r--r--libavcodec/x86/dsputil_mmx.c14
-rw-r--r--libavcodec/x86/dsputilenc.asm8
-rw-r--r--libavcodec/x86/dsputilenc_mmx.c8
-rw-r--r--libavcodec/x86/h264_idct.asm40
-rw-r--r--libavcodec/x86/h264_qpel.c12
-rw-r--r--libavcodec/x86/vc1dsp.asm2
8 files changed, 45 insertions, 45 deletions
diff --git a/libavcodec/x86/ac3dsp.asm b/libavcodec/x86/ac3dsp.asm
index 724b0dc97a..0c00759c41 100644
--- a/libavcodec/x86/ac3dsp.asm
+++ b/libavcodec/x86/ac3dsp.asm
@@ -156,7 +156,7 @@ INIT_MMX mmx
%define ABS2 ABS2_MMX
AC3_MAX_MSB_ABS_INT16 or_abs
INIT_MMX mmx2
-%define ABS2 ABS2_MMX2
+%define ABS2 ABS2_MMXEXT
AC3_MAX_MSB_ABS_INT16 min_max
INIT_XMM sse2
AC3_MAX_MSB_ABS_INT16 min_max
diff --git a/libavcodec/x86/cavsdsp.c b/libavcodec/x86/cavsdsp.c
index 5350f7ea6a..b628f080e4 100644
--- a/libavcodec/x86/cavsdsp.c
+++ b/libavcodec/x86/cavsdsp.c
@@ -430,7 +430,7 @@ static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, ui
"mov" #size " " #b ", " #temp " \n\t"\
"pavgusb " #temp ", " #a " \n\t"\
"mov" #size " " #a ", " #b " \n\t"
-#define AVG_MMX2_OP(a,b,temp, size) \
+#define AVG_MMXEXT_OP(a, b, temp, size) \
"mov" #size " " #b ", " #temp " \n\t"\
"pavgb " #temp ", " #a " \n\t"\
"mov" #size " " #a ", " #b " \n\t"
@@ -439,7 +439,7 @@ static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, ui
#if HAVE_MMXEXT_INLINE
QPEL_CAVS(put_, PUT_OP, mmx2)
-QPEL_CAVS(avg_, AVG_MMX2_OP, mmx2)
+QPEL_CAVS(avg_,AVG_MMXEXT_OP, mmx2)
CAVS_MC(put_, 8, mmx2)
CAVS_MC(put_, 16,mmx2)
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index ef843b5234..1e78c20a96 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -923,7 +923,7 @@ static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
"packuswb %%mm5, %%mm5 \n\t" \
OP(%%mm5, out, %%mm7, d)
-#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW) \
+#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMXEXT, OP_3DNOW) \
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, \
uint8_t *src, \
int dstStride, \
@@ -991,7 +991,7 @@ static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, \
"psraw $5, %%mm3 \n\t" \
"movq %5, %%mm1 \n\t" \
"packuswb %%mm3, %%mm1 \n\t" \
- OP_MMX2(%%mm1, (%1), %%mm4, q) \
+ OP_MMXEXT(%%mm1, (%1), %%mm4, q) \
/* mm0 = GHIJ, mm2 = FGHI, mm5 = HIJK, mm6 = IJKL, mm7 = 0 */ \
\
"movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */ \
@@ -1038,7 +1038,7 @@ static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, \
"paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */ \
"psraw $5, %%mm4 \n\t" \
"packuswb %%mm4, %%mm0 \n\t" \
- OP_MMX2(%%mm0, 8(%1), %%mm4, q) \
+ OP_MMXEXT(%%mm0, 8(%1), %%mm4, q) \
\
"add %3, %0 \n\t" \
"add %4, %1 \n\t" \
@@ -1175,7 +1175,7 @@ static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, \
"paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */ \
"psraw $5, %%mm3 \n\t" \
"packuswb %%mm3, %%mm0 \n\t" \
- OP_MMX2(%%mm0, (%1), %%mm4, q) \
+ OP_MMXEXT(%%mm0, (%1), %%mm4, q) \
\
"add %3, %0 \n\t" \
"add %4, %1 \n\t" \
@@ -1744,19 +1744,19 @@ static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
"pavgusb "#temp", "#a" \n\t" \
"mov"#size" "#a", "#b" \n\t"
-#define AVG_MMX2_OP(a, b, temp, size) \
+#define AVG_MMXEXT_OP(a, b, temp, size) \
"mov"#size" "#b", "#temp" \n\t" \
"pavgb "#temp", "#a" \n\t" \
"mov"#size" "#a", "#b" \n\t"
QPEL_BASE(put_, ff_pw_16, _, PUT_OP, PUT_OP)
-QPEL_BASE(avg_, ff_pw_16, _, AVG_MMX2_OP, AVG_3DNOW_OP)
+QPEL_BASE(avg_, ff_pw_16, _, AVG_MMXEXT_OP, AVG_3DNOW_OP)
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
QPEL_OP(put_, ff_pw_16, _, PUT_OP, 3dnow)
QPEL_OP(avg_, ff_pw_16, _, AVG_3DNOW_OP, 3dnow)
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
QPEL_OP(put_, ff_pw_16, _, PUT_OP, mmx2)
-QPEL_OP(avg_, ff_pw_16, _, AVG_MMX2_OP, mmx2)
+QPEL_OP(avg_, ff_pw_16, _, AVG_MMXEXT_OP, mmx2)
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
/***********************************/
diff --git a/libavcodec/x86/dsputilenc.asm b/libavcodec/x86/dsputilenc.asm
index 597f894c4e..6c4fb505da 100644
--- a/libavcodec/x86/dsputilenc.asm
+++ b/libavcodec/x86/dsputilenc.asm
@@ -112,7 +112,7 @@ SECTION .text
movd %3, %1
%endmacro
-%macro HSUM_MMX2 3
+%macro HSUM_MMXEXT 3
pshufw %2, %1, 0xE
paddusw %1, %2
pshufw %2, %1, 0x1
@@ -263,12 +263,12 @@ INIT_MMX
%define HSUM HSUM_MMX
HADAMARD8_DIFF_MMX mmx
-%define ABS1 ABS1_MMX2
-%define HSUM HSUM_MMX2
+%define ABS1 ABS1_MMXEXT
+%define HSUM HSUM_MMXEXT
HADAMARD8_DIFF_MMX mmx2
INIT_XMM
-%define ABS2 ABS2_MMX2
+%define ABS2 ABS2_MMXEXT
%if ARCH_X86_64
%define ABS_SUM_8x8 ABS_SUM_8x8_64
%else
diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c
index b7d88f0f36..43940bdf81 100644
--- a/libavcodec/x86/dsputilenc_mmx.c
+++ b/libavcodec/x86/dsputilenc_mmx.c
@@ -888,7 +888,7 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, c
"pxor " #z ", " #a " \n\t"\
"psubw " #z ", " #a " \n\t"
-#define MMABS_MMX2(a,z)\
+#define MMABS_MMXEXT(a, z) \
"pxor " #z ", " #z " \n\t"\
"psubw " #a ", " #z " \n\t"\
"pmaxsw " #z ", " #a " \n\t"
@@ -912,7 +912,7 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, c
"paddusw "#t", "#a" \n\t"\
"movd "#a", "#dst" \n\t"\
-#define HSUM_MMX2(a, t, dst)\
+#define HSUM_MMXEXT(a, t, dst) \
"pshufw $0x0E, "#a", "#t" \n\t"\
"paddusw "#t", "#a" \n\t"\
"pshufw $0x01, "#a", "#t" \n\t"\
@@ -974,8 +974,8 @@ DCT_SAD_FUNC(mmx)
#undef MMABS
#undef HSUM
-#define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
-#define MMABS(a,z) MMABS_MMX2(a,z)
+#define HSUM(a,t,dst) HSUM_MMXEXT(a,t,dst)
+#define MMABS(a,z) MMABS_MMXEXT(a,z)
DCT_SAD_FUNC(mmx2)
#undef HSUM
#undef DCT_SAD
diff --git a/libavcodec/x86/h264_idct.asm b/libavcodec/x86/h264_idct.asm
index 5d861d3cab..5e779cb465 100644
--- a/libavcodec/x86/h264_idct.asm
+++ b/libavcodec/x86/h264_idct.asm
@@ -246,7 +246,7 @@ cglobal h264_idct8_add_8_sse2, 3, 4, 10
IDCT8_ADD_SSE r0, r1, r2, r3
RET
-%macro DC_ADD_MMX2_INIT 2-3
+%macro DC_ADD_MMXEXT_INIT 2-3
%if %0 == 2
movsx %1, word [%1]
add %1, 32
@@ -266,7 +266,7 @@ cglobal h264_idct8_add_8_sse2, 3, 4, 10
packuswb m1, m1
%endmacro
-%macro DC_ADD_MMX2_OP 4
+%macro DC_ADD_MMXEXT_OP 4
%1 m2, [%2 ]
%1 m3, [%2+%3 ]
%1 m4, [%2+%3*2]
@@ -288,16 +288,16 @@ cglobal h264_idct8_add_8_sse2, 3, 4, 10
INIT_MMX
; ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
cglobal h264_idct_dc_add_8_mmx2, 3, 3, 0
- DC_ADD_MMX2_INIT r1, r2
- DC_ADD_MMX2_OP movh, r0, r2, r1
+ DC_ADD_MMXEXT_INIT r1, r2
+ DC_ADD_MMXEXT_OP movh, r0, r2, r1
RET
; ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
cglobal h264_idct8_dc_add_8_mmx2, 3, 3, 0
- DC_ADD_MMX2_INIT r1, r2
- DC_ADD_MMX2_OP mova, r0, r2, r1
+ DC_ADD_MMXEXT_INIT r1, r2
+ DC_ADD_MMXEXT_OP mova, r0, r2, r1
lea r0, [r0+r2*4]
- DC_ADD_MMX2_OP mova, r0, r2, r1
+ DC_ADD_MMXEXT_OP mova, r0, r2, r1
RET
; ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset,
@@ -371,14 +371,14 @@ cglobal h264_idct_add16_8_mmx2, 5, 8 + npicregs, 0, dst1, block_offset, block, s
movsx r6, word [r2]
test r6, r6
jz .no_dc
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
lea dst2q, [r0+dst2q]
- DC_ADD_MMX2_OP movh, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
@@ -445,14 +445,14 @@ cglobal h264_idct_add16intra_8_mmx2, 5, 8 + npicregs, 0, dst1, block_offset, blo
movsx r6, word [r2]
test r6, r6
jz .skipblock
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
add dst2q, r0
- DC_ADD_MMX2_OP movh, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
@@ -483,16 +483,16 @@ cglobal h264_idct8_add4_8_mmx2, 5, 8 + npicregs, 0, dst1, block_offset, block, s
movsx r6, word [r2]
test r6, r6
jz .no_dc
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
lea dst2q, [r0+dst2q]
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
lea dst2q, [dst2q+r3*4]
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
@@ -541,16 +541,16 @@ cglobal h264_idct8_add4_8_sse2, 5, 8 + npicregs, 10, dst1, block_offset, block,
test r6, r6
jz .no_dc
INIT_MMX
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
add dst2q, r0
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
lea dst2q, [dst2q+r3*4]
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
@@ -644,7 +644,7 @@ h264_idct_add8_mmx2_plane:
movsx r6, word [r2]
test r6, r6
jz .skipblock
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64
mov r0d, dword [r1+r5*4]
add r0, [dst2q]
@@ -653,7 +653,7 @@ h264_idct_add8_mmx2_plane:
mov r0, [r0]
add r0, dword [r1+r5*4]
%endif
- DC_ADD_MMX2_OP movh, r0, r3, r6
+ DC_ADD_MMXEXT_OP movh, r0, r3, r6
.skipblock:
inc r5
add r2, 32
@@ -697,7 +697,7 @@ h264_idct_dc_add8_mmx2:
pshufw m1, m0, 0xFA ; -d-d-d-d-D-D-D-D
punpcklwd m0, m0 ; d d d d D D D D
lea r6, [r3*3]
- DC_ADD_MMX2_OP movq, r0, r3, r6
+ DC_ADD_MMXEXT_OP movq, r0, r3, r6
ret
ALIGN 16
diff --git a/libavcodec/x86/h264_qpel.c b/libavcodec/x86/h264_qpel.c
index 284c85a99b..5a2db781d2 100644
--- a/libavcodec/x86/h264_qpel.c
+++ b/libavcodec/x86/h264_qpel.c
@@ -1169,18 +1169,18 @@ QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
#undef PAVGB
#define PAVGB "pavgb"
QPEL_H264(put_, PUT_OP, mmx2)
-QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
+QPEL_H264(avg_,AVG_MMXEXT_OP, mmx2)
QPEL_H264_V_XMM(put_, PUT_OP, sse2)
-QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
+QPEL_H264_V_XMM(avg_,AVG_MMXEXT_OP, sse2)
QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
-QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
+QPEL_H264_HV_XMM(avg_,AVG_MMXEXT_OP, sse2)
#if HAVE_SSSE3_INLINE
QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
-QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
+QPEL_H264_H_XMM(avg_,AVG_MMXEXT_OP, ssse3)
QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
-QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
+QPEL_H264_HV2_XMM(avg_,AVG_MMXEXT_OP, ssse3)
QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
-QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
+QPEL_H264_HV_XMM(avg_,AVG_MMXEXT_OP, ssse3)
#endif
#undef PAVGB
diff --git a/libavcodec/x86/vc1dsp.asm b/libavcodec/x86/vc1dsp.asm
index e759cf5cf0..ab15f7b753 100644
--- a/libavcodec/x86/vc1dsp.asm
+++ b/libavcodec/x86/vc1dsp.asm
@@ -268,7 +268,7 @@ cglobal vc1_h_loop_filter8_%1, 3,5,0
RET
%endmacro
-%define PABSW PABSW_MMX2
+%define PABSW PABSW_MMXEXT
VC1_LF_MMX mmx2
INIT_XMM