aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2012-03-16 18:42:01 +0100
committerDiego Biurrun <diego@biurrun.de>2012-03-25 11:50:48 +0200
commit62ce9defb81d0b6bd179131d1502858c8778f411 (patch)
tree52d48bbde1837277629b04ac951edc552a7a68b8 /libavcodec
parent3b54912113f8b3a5d8c70368b2b759be773b4b3f (diff)
downloadffmpeg-62ce9defb81d0b6bd179131d1502858c8778f411.tar.gz
x86: dsputil: prettyprint gcc inline asm
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/x86/dsputil_mmx.c2605
1 files changed, 1310 insertions, 1295 deletions
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index bb9ad7854e..040e37b38d 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -84,81 +84,81 @@ DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEF
DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
-#define JUMPALIGN() __asm__ volatile (".p2align 3"::)
-#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
+#define JUMPALIGN() __asm__ volatile (".p2align 3"::)
+#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
-#define MOVQ_BFE(regd) \
- __asm__ volatile ( \
- "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
- "paddb %%" #regd ", %%" #regd " \n\t" ::)
+#define MOVQ_BFE(regd) \
+ __asm__ volatile ( \
+ "pcmpeqd %%"#regd", %%"#regd" \n\t" \
+ "paddb %%"#regd", %%"#regd" \n\t" ::)
#ifndef PIC
-#define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
-#define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
+#define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
+#define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
#else
// for shared library it's better to use this way for accessing constants
// pcmpeqd -> -1
-#define MOVQ_BONE(regd) \
- __asm__ volatile ( \
- "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
- "psrlw $15, %%" #regd " \n\t" \
- "packuswb %%" #regd ", %%" #regd " \n\t" ::)
-
-#define MOVQ_WTWO(regd) \
- __asm__ volatile ( \
- "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
- "psrlw $15, %%" #regd " \n\t" \
- "psllw $1, %%" #regd " \n\t"::)
+#define MOVQ_BONE(regd) \
+ __asm__ volatile ( \
+ "pcmpeqd %%"#regd", %%"#regd" \n\t" \
+ "psrlw $15, %%"#regd" \n\t" \
+ "packuswb %%"#regd", %%"#regd" \n\t" ::)
+
+#define MOVQ_WTWO(regd) \
+ __asm__ volatile ( \
+ "pcmpeqd %%"#regd", %%"#regd" \n\t" \
+ "psrlw $15, %%"#regd" \n\t" \
+ "psllw $1, %%"#regd" \n\t"::)
#endif
// using regr as temporary and for the output result
// first argument is unmodifed and second is trashed
// regfe is supposed to contain 0xfefefefefefefefe
-#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
- "movq " #rega ", " #regr " \n\t"\
- "pand " #regb ", " #regr " \n\t"\
- "pxor " #rega ", " #regb " \n\t"\
- "pand " #regfe "," #regb " \n\t"\
- "psrlq $1, " #regb " \n\t"\
- "paddb " #regb ", " #regr " \n\t"
-
-#define PAVGB_MMX(rega, regb, regr, regfe) \
- "movq " #rega ", " #regr " \n\t"\
- "por " #regb ", " #regr " \n\t"\
- "pxor " #rega ", " #regb " \n\t"\
- "pand " #regfe "," #regb " \n\t"\
- "psrlq $1, " #regb " \n\t"\
- "psubb " #regb ", " #regr " \n\t"
+#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
+ "movq "#rega", "#regr" \n\t" \
+ "pand "#regb", "#regr" \n\t" \
+ "pxor "#rega", "#regb" \n\t" \
+ "pand "#regfe", "#regb" \n\t" \
+ "psrlq $1, "#regb" \n\t" \
+ "paddb "#regb", "#regr" \n\t"
+
+#define PAVGB_MMX(rega, regb, regr, regfe) \
+ "movq "#rega", "#regr" \n\t" \
+ "por "#regb", "#regr" \n\t" \
+ "pxor "#rega", "#regb" \n\t" \
+ "pand "#regfe", "#regb" \n\t" \
+ "psrlq $1, "#regb" \n\t" \
+ "psubb "#regb", "#regr" \n\t"
// mm6 is supposed to contain 0xfefefefefefefefe
-#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
- "movq " #rega ", " #regr " \n\t"\
- "movq " #regc ", " #regp " \n\t"\
- "pand " #regb ", " #regr " \n\t"\
- "pand " #regd ", " #regp " \n\t"\
- "pxor " #rega ", " #regb " \n\t"\
- "pxor " #regc ", " #regd " \n\t"\
- "pand %%mm6, " #regb " \n\t"\
- "pand %%mm6, " #regd " \n\t"\
- "psrlq $1, " #regb " \n\t"\
- "psrlq $1, " #regd " \n\t"\
- "paddb " #regb ", " #regr " \n\t"\
- "paddb " #regd ", " #regp " \n\t"
-
-#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
- "movq " #rega ", " #regr " \n\t"\
- "movq " #regc ", " #regp " \n\t"\
- "por " #regb ", " #regr " \n\t"\
- "por " #regd ", " #regp " \n\t"\
- "pxor " #rega ", " #regb " \n\t"\
- "pxor " #regc ", " #regd " \n\t"\
- "pand %%mm6, " #regb " \n\t"\
- "pand %%mm6, " #regd " \n\t"\
- "psrlq $1, " #regd " \n\t"\
- "psrlq $1, " #regb " \n\t"\
- "psubb " #regb ", " #regr " \n\t"\
- "psubb " #regd ", " #regp " \n\t"
+#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
+ "movq "#rega", "#regr" \n\t" \
+ "movq "#regc", "#regp" \n\t" \
+ "pand "#regb", "#regr" \n\t" \
+ "pand "#regd", "#regp" \n\t" \
+ "pxor "#rega", "#regb" \n\t" \
+ "pxor "#regc", "#regd" \n\t" \
+ "pand %%mm6, "#regb" \n\t" \
+ "pand %%mm6, "#regd" \n\t" \
+ "psrlq $1, "#regb" \n\t" \
+ "psrlq $1, "#regd" \n\t" \
+ "paddb "#regb", "#regr" \n\t" \
+ "paddb "#regd", "#regp" \n\t"
+
+#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
+ "movq "#rega", "#regr" \n\t" \
+ "movq "#regc", "#regp" \n\t" \
+ "por "#regb", "#regr" \n\t" \
+ "por "#regd", "#regp" \n\t" \
+ "pxor "#rega", "#regb" \n\t" \
+ "pxor "#regc", "#regd" \n\t" \
+ "pand %%mm6, "#regb" \n\t" \
+ "pand %%mm6, "#regd" \n\t" \
+ "psrlq $1, "#regd" \n\t" \
+ "psrlq $1, "#regb" \n\t" \
+ "psubb "#regb", "#regr" \n\t" \
+ "psubb "#regd", "#regp" \n\t"
/***********************************/
/* MMX no rounding */
@@ -244,69 +244,70 @@ void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
p = block;
pix = pixels;
/* unrolled loop */
- __asm__ volatile(
- "movq %3, %%mm0 \n\t"
- "movq 8%3, %%mm1 \n\t"
- "movq 16%3, %%mm2 \n\t"
- "movq 24%3, %%mm3 \n\t"
- "movq 32%3, %%mm4 \n\t"
- "movq 40%3, %%mm5 \n\t"
- "movq 48%3, %%mm6 \n\t"
- "movq 56%3, %%mm7 \n\t"
- "packuswb %%mm1, %%mm0 \n\t"
- "packuswb %%mm3, %%mm2 \n\t"
- "packuswb %%mm5, %%mm4 \n\t"
- "packuswb %%mm7, %%mm6 \n\t"
- "movq %%mm0, (%0) \n\t"
- "movq %%mm2, (%0, %1) \n\t"
- "movq %%mm4, (%0, %1, 2) \n\t"
- "movq %%mm6, (%0, %2) \n\t"
- ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
- :"memory");
+ __asm__ volatile (
+ "movq %3, %%mm0 \n\t"
+ "movq 8%3, %%mm1 \n\t"
+ "movq 16%3, %%mm2 \n\t"
+ "movq 24%3, %%mm3 \n\t"
+ "movq 32%3, %%mm4 \n\t"
+ "movq 40%3, %%mm5 \n\t"
+ "movq 48%3, %%mm6 \n\t"
+ "movq 56%3, %%mm7 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "packuswb %%mm5, %%mm4 \n\t"
+ "packuswb %%mm7, %%mm6 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm2, (%0, %1) \n\t"
+ "movq %%mm4, (%0, %1, 2) \n\t"
+ "movq %%mm6, (%0, %2) \n\t"
+ :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
+ "m"(*p)
+ : "memory");
pix += line_size * 4;
p += 32;
// if here would be an exact copy of the code above
// compiler would generate some very strange code
// thus using "r"
- __asm__ volatile(
- "movq (%3), %%mm0 \n\t"
- "movq 8(%3), %%mm1 \n\t"
- "movq 16(%3), %%mm2 \n\t"
- "movq 24(%3), %%mm3 \n\t"
- "movq 32(%3), %%mm4 \n\t"
- "movq 40(%3), %%mm5 \n\t"
- "movq 48(%3), %%mm6 \n\t"
- "movq 56(%3), %%mm7 \n\t"
- "packuswb %%mm1, %%mm0 \n\t"
- "packuswb %%mm3, %%mm2 \n\t"
- "packuswb %%mm5, %%mm4 \n\t"
- "packuswb %%mm7, %%mm6 \n\t"
- "movq %%mm0, (%0) \n\t"
- "movq %%mm2, (%0, %1) \n\t"
- "movq %%mm4, (%0, %1, 2) \n\t"
- "movq %%mm6, (%0, %2) \n\t"
- ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
- :"memory");
-}
-
-#define put_signed_pixels_clamped_mmx_half(off) \
- "movq "#off"(%2), %%mm1 \n\t"\
- "movq 16+"#off"(%2), %%mm2 \n\t"\
- "movq 32+"#off"(%2), %%mm3 \n\t"\
- "movq 48+"#off"(%2), %%mm4 \n\t"\
- "packsswb 8+"#off"(%2), %%mm1 \n\t"\
- "packsswb 24+"#off"(%2), %%mm2 \n\t"\
- "packsswb 40+"#off"(%2), %%mm3 \n\t"\
- "packsswb 56+"#off"(%2), %%mm4 \n\t"\
- "paddb %%mm0, %%mm1 \n\t"\
- "paddb %%mm0, %%mm2 \n\t"\
- "paddb %%mm0, %%mm3 \n\t"\
- "paddb %%mm0, %%mm4 \n\t"\
- "movq %%mm1, (%0) \n\t"\
- "movq %%mm2, (%0, %3) \n\t"\
- "movq %%mm3, (%0, %3, 2) \n\t"\
- "movq %%mm4, (%0, %1) \n\t"
+ __asm__ volatile (
+ "movq (%3), %%mm0 \n\t"
+ "movq 8(%3), %%mm1 \n\t"
+ "movq 16(%3), %%mm2 \n\t"
+ "movq 24(%3), %%mm3 \n\t"
+ "movq 32(%3), %%mm4 \n\t"
+ "movq 40(%3), %%mm5 \n\t"
+ "movq 48(%3), %%mm6 \n\t"
+ "movq 56(%3), %%mm7 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "packuswb %%mm5, %%mm4 \n\t"
+ "packuswb %%mm7, %%mm6 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm2, (%0, %1) \n\t"
+ "movq %%mm4, (%0, %1, 2) \n\t"
+ "movq %%mm6, (%0, %2) \n\t"
+ :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
+ : "memory");
+}
+
+#define put_signed_pixels_clamped_mmx_half(off) \
+ "movq "#off"(%2), %%mm1 \n\t" \
+ "movq 16 + "#off"(%2), %%mm2 \n\t" \
+ "movq 32 + "#off"(%2), %%mm3 \n\t" \
+ "movq 48 + "#off"(%2), %%mm4 \n\t" \
+ "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
+ "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
+ "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
+ "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
+ "paddb %%mm0, %%mm1 \n\t" \
+ "paddb %%mm0, %%mm2 \n\t" \
+ "paddb %%mm0, %%mm3 \n\t" \
+ "paddb %%mm0, %%mm4 \n\t" \
+ "movq %%mm1, (%0) \n\t" \
+ "movq %%mm2, (%0, %3) \n\t" \
+ "movq %%mm3, (%0, %3, 2) \n\t" \
+ "movq %%mm4, (%0, %1) \n\t"
void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
int line_size)
@@ -315,14 +316,14 @@ void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
x86_reg line_skip3;
__asm__ volatile (
- "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
- "lea (%3, %3, 2), %1 \n\t"
- put_signed_pixels_clamped_mmx_half(0)
- "lea (%0, %3, 4), %0 \n\t"
- put_signed_pixels_clamped_mmx_half(64)
- :"+&r" (pixels), "=&r" (line_skip3)
- :"r" (block), "r"(line_skip)
- :"memory");
+ "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
+ "lea (%3, %3, 2), %1 \n\t"
+ put_signed_pixels_clamped_mmx_half(0)
+ "lea (%0, %3, 4), %0 \n\t"
+ put_signed_pixels_clamped_mmx_half(64)
+ : "+&r"(pixels), "=&r"(line_skip3)
+ : "r"(block), "r"(line_skip)
+ : "memory");
}
void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
@@ -338,30 +339,30 @@ void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
MOVQ_ZERO(mm7);
i = 4;
do {
- __asm__ volatile(
- "movq (%2), %%mm0 \n\t"
- "movq 8(%2), %%mm1 \n\t"
- "movq 16(%2), %%mm2 \n\t"
- "movq 24(%2), %%mm3 \n\t"
- "movq %0, %%mm4 \n\t"
- "movq %1, %%mm6 \n\t"
- "movq %%mm4, %%mm5 \n\t"
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpckhbw %%mm7, %%mm5 \n\t"
- "paddsw %%mm4, %%mm0 \n\t"
- "paddsw %%mm5, %%mm1 \n\t"
- "movq %%mm6, %%mm5 \n\t"
- "punpcklbw %%mm7, %%mm6 \n\t"
- "punpckhbw %%mm7, %%mm5 \n\t"
- "paddsw %%mm6, %%mm2 \n\t"
- "paddsw %%mm5, %%mm3 \n\t"
- "packuswb %%mm1, %%mm0 \n\t"
- "packuswb %%mm3, %%mm2 \n\t"
- "movq %%mm0, %0 \n\t"
- "movq %%mm2, %1 \n\t"
- :"+m"(*pix), "+m"(*(pix+line_size))
- :"r"(p)
- :"memory");
+ __asm__ volatile (
+ "movq (%2), %%mm0 \n\t"
+ "movq 8(%2), %%mm1 \n\t"
+ "movq 16(%2), %%mm2 \n\t"
+ "movq 24(%2), %%mm3 \n\t"
+ "movq %0, %%mm4 \n\t"
+ "movq %1, %%mm6 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddsw %%mm4, %%mm0 \n\t"
+ "paddsw %%mm5, %%mm1 \n\t"
+ "movq %%mm6, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddsw %%mm6, %%mm2 \n\t"
+ "paddsw %%mm5, %%mm3 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %0 \n\t"
+ "movq %%mm2, %1 \n\t"
+ : "+m"(*pix), "+m"(*(pix + line_size))
+ : "r"(p)
+ : "memory");
pix += line_size * 2;
p += 16;
} while (--i);
@@ -370,175 +371,175 @@ void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels,
int line_size, int h)
{
- __asm__ volatile(
- "lea (%3, %3), %%"REG_a" \n\t"
- ".p2align 3 \n\t"
- "1: \n\t"
- "movd (%1), %%mm0 \n\t"
- "movd (%1, %3), %%mm1 \n\t"
- "movd %%mm0, (%2) \n\t"
- "movd %%mm1, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
- "movd (%1), %%mm0 \n\t"
- "movd (%1, %3), %%mm1 \n\t"
- "movd %%mm0, (%2) \n\t"
- "movd %%mm1, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
- "subl $4, %0 \n\t"
- "jnz 1b \n\t"
- : "+g"(h), "+r" (pixels), "+r" (block)
- : "r"((x86_reg)line_size)
- : "%"REG_a, "memory"
+ __asm__ volatile (
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ".p2align 3 \n\t"
+ "1: \n\t"
+ "movd (%1 ), %%mm0 \n\t"
+ "movd (%1, %3), %%mm1 \n\t"
+ "movd %%mm0, (%2) \n\t"
+ "movd %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movd (%1 ), %%mm0 \n\t"
+ "movd (%1, %3), %%mm1 \n\t"
+ "movd %%mm0, (%2) \n\t"
+ "movd %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r"(pixels), "+r"(block)
+ : "r"((x86_reg)line_size)
+ : "%"REG_a, "memory"
);
}
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
int line_size, int h)
{
- __asm__ volatile(
- "lea (%3, %3), %%"REG_a" \n\t"
- ".p2align 3 \n\t"
- "1: \n\t"
- "movq (%1), %%mm0 \n\t"
- "movq (%1, %3), %%mm1 \n\t"
- "movq %%mm0, (%2) \n\t"
- "movq %%mm1, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
- "movq (%1), %%mm0 \n\t"
- "movq (%1, %3), %%mm1 \n\t"
- "movq %%mm0, (%2) \n\t"
- "movq %%mm1, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
- "subl $4, %0 \n\t"
- "jnz 1b \n\t"
- : "+g"(h), "+r" (pixels), "+r" (block)
- : "r"((x86_reg)line_size)
- : "%"REG_a, "memory"
+ __asm__ volatile (
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ".p2align 3 \n\t"
+ "1: \n\t"
+ "movq (%1 ), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1 ), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r"(pixels), "+r"(block)
+ : "r"((x86_reg)line_size)
+ : "%"REG_a, "memory"
);
}
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
int line_size, int h)
{
- __asm__ volatile(
- "lea (%3, %3), %%"REG_a" \n\t"
- ".p2align 3 \n\t"
- "1: \n\t"
- "movq (%1), %%mm0 \n\t"
- "movq 8(%1), %%mm4 \n\t"
- "movq (%1, %3), %%mm1 \n\t"
- "movq 8(%1, %3), %%mm5 \n\t"
- "movq %%mm0, (%2) \n\t"
- "movq %%mm4, 8(%2) \n\t"
- "movq %%mm1, (%2, %3) \n\t"
- "movq %%mm5, 8(%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
- "movq (%1), %%mm0 \n\t"
- "movq 8(%1), %%mm4 \n\t"
- "movq (%1, %3), %%mm1 \n\t"
- "movq 8(%1, %3), %%mm5 \n\t"
- "movq %%mm0, (%2) \n\t"
- "movq %%mm4, 8(%2) \n\t"
- "movq %%mm1, (%2, %3) \n\t"
- "movq %%mm5, 8(%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
- "subl $4, %0 \n\t"
- "jnz 1b \n\t"
- : "+g"(h), "+r" (pixels), "+r" (block)
- : "r"((x86_reg)line_size)
- : "%"REG_a, "memory"
+ __asm__ volatile (
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ".p2align 3 \n\t"
+ "1: \n\t"
+ "movq (%1 ), %%mm0 \n\t"
+ "movq 8(%1 ), %%mm4 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm5 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1 ), %%mm0 \n\t"
+ "movq 8(%1 ), %%mm4 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm5 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r"(pixels), "+r"(block)
+ : "r"((x86_reg)line_size)
+ : "%"REG_a, "memory"
);
}
static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
int line_size, int h)
{
- __asm__ volatile(
- "1: \n\t"
- "movdqu (%1), %%xmm0 \n\t"
- "movdqu (%1,%3), %%xmm1 \n\t"
- "movdqu (%1,%3,2), %%xmm2 \n\t"
- "movdqu (%1,%4), %%xmm3 \n\t"
- "lea (%1,%3,4), %1 \n\t"
- "movdqa %%xmm0, (%2) \n\t"
- "movdqa %%xmm1, (%2,%3) \n\t"
- "movdqa %%xmm2, (%2,%3,2) \n\t"
- "movdqa %%xmm3, (%2,%4) \n\t"
- "subl $4, %0 \n\t"
- "lea (%2,%3,4), %2 \n\t"
- "jnz 1b \n\t"
- : "+g"(h), "+r" (pixels), "+r" (block)
- : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
- : "memory"
+ __asm__ volatile (
+ "1: \n\t"
+ "movdqu (%1 ), %%xmm0 \n\t"
+ "movdqu (%1, %3 ), %%xmm1 \n\t"
+ "movdqu (%1, %3, 2), %%xmm2 \n\t"
+ "movdqu (%1, %4 ), %%xmm3 \n\t"
+ "lea (%1, %3, 4), %1 \n\t"
+ "movdqa %%xmm0, (%2) \n\t"
+ "movdqa %%xmm1, (%2, %3) \n\t"
+ "movdqa %%xmm2, (%2, %3, 2) \n\t"
+ "movdqa %%xmm3, (%2, %4) \n\t"
+ "subl $4, %0 \n\t"
+ "lea (%2, %3, 4), %2 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r"(pixels), "+r"(block)
+ : "r"((x86_reg)line_size), "r"((x86_reg)3L * line_size)
+ : "memory"
);
}
static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
int line_size, int h)
{
- __asm__ volatile(
- "1: \n\t"
- "movdqu (%1), %%xmm0 \n\t"
- "movdqu (%1,%3), %%xmm1 \n\t"
- "movdqu (%1,%3,2), %%xmm2 \n\t"
- "movdqu (%1,%4), %%xmm3 \n\t"
- "lea (%1,%3,4), %1 \n\t"
- "pavgb (%2), %%xmm0 \n\t"
- "pavgb (%2,%3), %%xmm1 \n\t"
- "pavgb (%2,%3,2), %%xmm2 \n\t"
- "pavgb (%2,%4), %%xmm3 \n\t"
- "movdqa %%xmm0, (%2) \n\t"
- "movdqa %%xmm1, (%2,%3) \n\t"
- "movdqa %%xmm2, (%2,%3,2) \n\t"
- "movdqa %%xmm3, (%2,%4) \n\t"
- "subl $4, %0 \n\t"
- "lea (%2,%3,4), %2 \n\t"
- "jnz 1b \n\t"
- : "+g"(h), "+r" (pixels), "+r" (block)
- : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
- : "memory"
+ __asm__ volatile (
+ "1: \n\t"
+ "movdqu (%1 ), %%xmm0 \n\t"
+ "movdqu (%1, %3 ), %%xmm1 \n\t"
+ "movdqu (%1, %3, 2), %%xmm2 \n\t"
+ "movdqu (%1, %4 ), %%xmm3 \n\t"
+ "lea (%1, %3, 4), %1 \n\t"
+ "pavgb (%2 ), %%xmm0 \n\t"
+ "pavgb (%2, %3 ), %%xmm1 \n\t"
+ "pavgb (%2, %3, 2), %%xmm2 \n\t"
+ "pavgb (%2, %4), %%xmm3 \n\t"
+ "movdqa %%xmm0, (%2) \n\t"
+ "movdqa %%xmm1, (%2, %3) \n\t"
+ "movdqa %%xmm2, (%2, %3, 2) \n\t"
+ "movdqa %%xmm3, (%2, %4) \n\t"
+ "subl $4, %0 \n\t"
+ "lea (%2, %3, 4), %2 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r"(pixels), "+r"(block)
+ : "r"((x86_reg)line_size), "r"((x86_reg)3L * line_size)
+ : "memory"
);
}
-#define CLEAR_BLOCKS(name,n) \
-static void name(DCTELEM *blocks)\
-{\
- __asm__ volatile(\
- "pxor %%mm7, %%mm7 \n\t"\
- "mov %1, %%"REG_a" \n\t"\
- "1: \n\t"\
- "movq %%mm7, (%0, %%"REG_a") \n\t"\
- "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
- "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
- "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
- "add $32, %%"REG_a" \n\t"\
- " js 1b \n\t"\
- : : "r" (((uint8_t *)blocks)+128*n),\
- "i" (-128*n)\
- : "%"REG_a\
- );\
+#define CLEAR_BLOCKS(name, n) \
+static void name(DCTELEM *blocks) \
+{ \
+ __asm__ volatile ( \
+ "pxor %%mm7, %%mm7 \n\t" \
+ "mov %1, %%"REG_a" \n\t" \
+ "1: \n\t" \
+ "movq %%mm7, (%0, %%"REG_a") \n\t" \
+ "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
+ "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
+ "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
+ "add $32, %%"REG_a" \n\t" \
+ "js 1b \n\t" \
+ :: "r"(((uint8_t *)blocks) + 128 * n), \
+ "i"(-128 * n) \
+ : "%"REG_a \
+ ); \
}
CLEAR_BLOCKS(clear_blocks_mmx, 6)
CLEAR_BLOCKS(clear_block_mmx, 1)
static void clear_block_sse(DCTELEM *block)
{
- __asm__ volatile(
- "xorps %%xmm0, %%xmm0 \n"
- "movaps %%xmm0, (%0) \n"
- "movaps %%xmm0, 16(%0) \n"
- "movaps %%xmm0, 32(%0) \n"
- "movaps %%xmm0, 48(%0) \n"
- "movaps %%xmm0, 64(%0) \n"
- "movaps %%xmm0, 80(%0) \n"
- "movaps %%xmm0, 96(%0) \n"
- "movaps %%xmm0, 112(%0) \n"
+ __asm__ volatile (
+ "xorps %%xmm0, %%xmm0 \n"
+ "movaps %%xmm0, (%0) \n"
+ "movaps %%xmm0, 16(%0) \n"
+ "movaps %%xmm0, 32(%0) \n"
+ "movaps %%xmm0, 48(%0) \n"
+ "movaps %%xmm0, 64(%0) \n"
+ "movaps %%xmm0, 80(%0) \n"
+ "movaps %%xmm0, 96(%0) \n"
+ "movaps %%xmm0, 112(%0) \n"
:: "r"(block)
: "memory"
);
@@ -546,22 +547,22 @@ static void clear_block_sse(DCTELEM *block)
static void clear_blocks_sse(DCTELEM *blocks)
{
- __asm__ volatile(
- "xorps %%xmm0, %%xmm0 \n"
- "mov %1, %%"REG_a" \n"
- "1: \n"
- "movaps %%xmm0, (%0, %%"REG_a") \n"
- "movaps %%xmm0, 16(%0, %%"REG_a") \n"
- "movaps %%xmm0, 32(%0, %%"REG_a") \n"
- "movaps %%xmm0, 48(%0, %%"REG_a") \n"
- "movaps %%xmm0, 64(%0, %%"REG_a") \n"
- "movaps %%xmm0, 80(%0, %%"REG_a") \n"
- "movaps %%xmm0, 96(%0, %%"REG_a") \n"
- "movaps %%xmm0, 112(%0, %%"REG_a") \n"
- "add $128, %%"REG_a" \n"
- " js 1b \n"
- : : "r" (((uint8_t *)blocks)+128*6),
- "i" (-128*6)
+ __asm__ volatile (
+ "xorps %%xmm0, %%xmm0 \n"
+ "mov %1, %%"REG_a" \n"
+ "1: \n"
+ "movaps %%xmm0, (%0, %%"REG_a") \n"
+ "movaps %%xmm0, 16(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 32(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 48(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 64(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 80(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 96(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 112(%0, %%"REG_a") \n"
+ "add $128, %%"REG_a" \n"
+ "js 1b \n"
+ :: "r"(((uint8_t *)blocks) + 128 * 6),
+ "i"(-128 * 6)
: "%"REG_a
);
}
@@ -569,23 +570,23 @@ static void clear_blocks_sse(DCTELEM *blocks)
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
{
x86_reg i = 0;
- __asm__ volatile(
- "jmp 2f \n\t"
+ __asm__ volatile (
+ "jmp 2f \n\t"
"1: \n\t"
- "movq (%1, %0), %%mm0 \n\t"
- "movq (%2, %0), %%mm1 \n\t"
- "paddb %%mm0, %%mm1 \n\t"
- "movq %%mm1, (%2, %0) \n\t"
- "movq 8(%1, %0), %%mm0 \n\t"
- "movq 8(%2, %0), %%mm1 \n\t"
- "paddb %%mm0, %%mm1 \n\t"
- "movq %%mm1, 8(%2, %0) \n\t"
- "add $16, %0 \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq (%2, %0), %%mm1 \n\t"
+ "paddb %%mm0, %%mm1 \n\t"
+ "movq %%mm1, (%2, %0) \n\t"
+ "movq 8(%1, %0), %%mm0 \n\t"
+ "movq 8(%2, %0), %%mm1 \n\t"
+ "paddb %%mm0, %%mm1 \n\t"
+ "movq %%mm1, 8(%2, %0) \n\t"
+ "add $16, %0 \n\t"
"2: \n\t"
- "cmp %3, %0 \n\t"
- " js 1b \n\t"
- : "+r" (i)
- : "r"(src), "r"(dst), "r"((x86_reg)w-15)
+ "cmp %3, %0 \n\t"
+ "js 1b \n\t"
+ : "+r"(i)
+ : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
);
for ( ; i < w; i++)
dst[i + 0] += src[i + 0];
@@ -601,124 +602,123 @@ static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
int l = *left & 0xff;
int tl = *left_top & 0xff;
int t;
- __asm__ volatile(
- "mov %7, %3 \n"
- "1: \n"
- "movzbl (%3,%4), %2 \n"
- "mov %2, %k3 \n"
- "sub %b1, %b3 \n"
- "add %b0, %b3 \n"
- "mov %2, %1 \n"
- "cmp %0, %2 \n"
- "cmovg %0, %2 \n"
- "cmovg %1, %0 \n"
- "cmp %k3, %0 \n"
- "cmovg %k3, %0 \n"
- "mov %7, %3 \n"
- "cmp %2, %0 \n"
- "cmovl %2, %0 \n"
- "add (%6,%4), %b0 \n"
- "mov %b0, (%5,%4) \n"
- "inc %4 \n"
- "jl 1b \n"
- :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
- :"r"(dst+w), "r"(diff+w), "rm"(top+w)
+ __asm__ volatile (
+ "mov %7, %3 \n"
+ "1: \n"
+ "movzbl (%3, %4), %2 \n"
+ "mov %2, %k3 \n"
+ "sub %b1, %b3 \n"
+ "add %b0, %b3 \n"
+ "mov %2, %1 \n"
+ "cmp %0, %2 \n"
+ "cmovg %0, %2 \n"
+ "cmovg %1, %0 \n"
+ "cmp %k3, %0 \n"
+ "cmovg %k3, %0 \n"
+ "mov %7, %3 \n"
+ "cmp %2, %0 \n"
+ "cmovl %2, %0 \n"
+ "add (%6, %4), %b0 \n"
+ "mov %b0, (%5, %4) \n"
+ "inc %4 \n"
+ "jl 1b \n"
+ : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
+ : "r"(dst + w), "r"(diff + w), "rm"(top + w)
);
*left = l;
*left_top = tl;
}
#endif
-#define H263_LOOP_FILTER \
- "pxor %%mm7, %%mm7 \n\t"\
- "movq %0, %%mm0 \n\t"\
- "movq %0, %%mm1 \n\t"\
- "movq %3, %%mm2 \n\t"\
- "movq %3, %%mm3 \n\t"\
- "punpcklbw %%mm7, %%mm0 \n\t"\
- "punpckhbw %%mm7, %%mm1 \n\t"\
- "punpcklbw %%mm7, %%mm2 \n\t"\
- "punpckhbw %%mm7, %%mm3 \n\t"\
- "psubw %%mm2, %%mm0 \n\t"\
- "psubw %%mm3, %%mm1 \n\t"\
- "movq %1, %%mm2 \n\t"\
- "movq %1, %%mm3 \n\t"\
- "movq %2, %%mm4 \n\t"\
- "movq %2, %%mm5 \n\t"\
- "punpcklbw %%mm7, %%mm2 \n\t"\
- "punpckhbw %%mm7, %%mm3 \n\t"\
- "punpcklbw %%mm7, %%mm4 \n\t"\
- "punpckhbw %%mm7, %%mm5 \n\t"\
- "psubw %%mm2, %%mm4 \n\t"\
- "psubw %%mm3, %%mm5 \n\t"\
- "psllw $2, %%mm4 \n\t"\
- "psllw $2, %%mm5 \n\t"\
- "paddw %%mm0, %%mm4 \n\t"\
- "paddw %%mm1, %%mm5 \n\t"\
- "pxor %%mm6, %%mm6 \n\t"\
- "pcmpgtw %%mm4, %%mm6 \n\t"\
- "pcmpgtw %%mm5, %%mm7 \n\t"\
- "pxor %%mm6, %%mm4 \n\t"\
- "pxor %%mm7, %%mm5 \n\t"\
- "psubw %%mm6, %%mm4 \n\t"\
- "psubw %%mm7, %%mm5 \n\t"\
- "psrlw $3, %%mm4 \n\t"\
- "psrlw $3, %%mm5 \n\t"\
- "packuswb %%mm5, %%mm4 \n\t"\
- "packsswb %%mm7, %%mm6 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"\
- "movd %4, %%mm2 \n\t"\
- "punpcklbw %%mm2, %%mm2 \n\t"\
- "punpcklbw %%mm2, %%mm2 \n\t"\
- "punpcklbw %%mm2, %%mm2 \n\t"\
- "psubusb %%mm4, %%mm2 \n\t"\
- "movq %%mm2, %%mm3 \n\t"\
- "psubusb %%mm4, %%mm3 \n\t"\
- "psubb %%mm3, %%mm2 \n\t"\
- "movq %1, %%mm3 \n\t"\
- "movq %2, %%mm4 \n\t"\
- "pxor %%mm6, %%mm3 \n\t"\
- "pxor %%mm6, %%mm4 \n\t"\
- "paddusb %%mm2, %%mm3 \n\t"\
- "psubusb %%mm2, %%mm4 \n\t"\
- "pxor %%mm6, %%mm3 \n\t"\
- "pxor %%mm6, %%mm4 \n\t"\
- "paddusb %%mm2, %%mm2 \n\t"\
- "packsswb %%mm1, %%mm0 \n\t"\
- "pcmpgtb %%mm0, %%mm7 \n\t"\
- "pxor %%mm7, %%mm0 \n\t"\
- "psubb %%mm7, %%mm0 \n\t"\
- "movq %%mm0, %%mm1 \n\t"\
- "psubusb %%mm2, %%mm0 \n\t"\
- "psubb %%mm0, %%mm1 \n\t"\
- "pand %5, %%mm1 \n\t"\
- "psrlw $2, %%mm1 \n\t"\
- "pxor %%mm7, %%mm1 \n\t"\
- "psubb %%mm7, %%mm1 \n\t"\
- "movq %0, %%mm5 \n\t"\
- "movq %3, %%mm6 \n\t"\
- "psubb %%mm1, %%mm5 \n\t"\
- "paddb %%mm1, %%mm6 \n\t"
+#define H263_LOOP_FILTER \
+ "pxor %%mm7, %%mm7 \n\t" \
+ "movq %0, %%mm0 \n\t" \
+ "movq %0, %%mm1 \n\t" \
+ "movq %3, %%mm2 \n\t" \
+ "movq %3, %%mm3 \n\t" \
+ "punpcklbw %%mm7, %%mm0 \n\t" \
+ "punpckhbw %%mm7, %%mm1 \n\t" \
+ "punpcklbw %%mm7, %%mm2 \n\t" \
+ "punpckhbw %%mm7, %%mm3 \n\t" \
+ "psubw %%mm2, %%mm0 \n\t" \
+ "psubw %%mm3, %%mm1 \n\t" \
+ "movq %1, %%mm2 \n\t" \
+ "movq %1, %%mm3 \n\t" \
+ "movq %2, %%mm4 \n\t" \
+ "movq %2, %%mm5 \n\t" \
+ "punpcklbw %%mm7, %%mm2 \n\t" \
+ "punpckhbw %%mm7, %%mm3 \n\t" \
+ "punpcklbw %%mm7, %%mm4 \n\t" \
+ "punpckhbw %%mm7, %%mm5 \n\t" \
+ "psubw %%mm2, %%mm4 \n\t" \
+ "psubw %%mm3, %%mm5 \n\t" \
+ "psllw $2, %%mm4 \n\t" \
+ "psllw $2, %%mm5 \n\t" \
+ "paddw %%mm0, %%mm4 \n\t" \
+ "paddw %%mm1, %%mm5 \n\t" \
+ "pxor %%mm6, %%mm6 \n\t" \
+ "pcmpgtw %%mm4, %%mm6 \n\t" \
+ "pcmpgtw %%mm5, %%mm7 \n\t" \
+ "pxor %%mm6, %%mm4 \n\t" \
+ "pxor %%mm7, %%mm5 \n\t" \
+ "psubw %%mm6, %%mm4 \n\t" \
+ "psubw %%mm7, %%mm5 \n\t" \
+ "psrlw $3, %%mm4 \n\t" \
+ "psrlw $3, %%mm5 \n\t" \
+ "packuswb %%mm5, %%mm4 \n\t" \
+ "packsswb %%mm7, %%mm6 \n\t" \
+ "pxor %%mm7, %%mm7 \n\t" \
+ "movd %4, %%mm2 \n\t" \
+ "punpcklbw %%mm2, %%mm2 \n\t" \
+ "punpcklbw %%mm2, %%mm2 \n\t" \
+ "punpcklbw %%mm2, %%mm2 \n\t" \
+ "psubusb %%mm4, %%mm2 \n\t" \
+ "movq %%mm2, %%mm3 \n\t" \
+ "psubusb %%mm4, %%mm3 \n\t" \
+ "psubb %%mm3, %%mm2 \n\t" \
+ "movq %1, %%mm3 \n\t" \
+ "movq %2, %%mm4 \n\t" \
+ "pxor %%mm6, %%mm3 \n\t" \
+ "pxor %%mm6, %%mm4 \n\t" \
+ "paddusb %%mm2, %%mm3 \n\t" \
+ "psubusb %%mm2, %%mm4 \n\t" \
+ "pxor %%mm6, %%mm3 \n\t" \
+ "pxor %%mm6, %%mm4 \n\t" \
+ "paddusb %%mm2, %%mm2 \n\t" \
+ "packsswb %%mm1, %%mm0 \n\t" \
+ "pcmpgtb %%mm0, %%mm7 \n\t" \
+ "pxor %%mm7, %%mm0 \n\t" \
+ "psubb %%mm7, %%mm0 \n\t" \
+ "movq %%mm0, %%mm1 \n\t" \
+ "psubusb %%mm2, %%mm0 \n\t" \
+ "psubb %%mm0, %%mm1 \n\t" \
+ "pand %5, %%mm1 \n\t" \
+ "psrlw $2, %%mm1 \n\t" \
+ "pxor %%mm7, %%mm1 \n\t" \
+ "psubb %%mm7, %%mm1 \n\t" \
+ "movq %0, %%mm5 \n\t" \
+ "movq %3, %%mm6 \n\t" \
+ "psubb %%mm1, %%mm5 \n\t" \
+ "paddb %%mm1, %%mm6 \n\t"
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale)
{
if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
const int strength = ff_h263_loop_filter_strength[qscale];
- __asm__ volatile(
-
- H263_LOOP_FILTER
-
- "movq %%mm3, %1 \n\t"
- "movq %%mm4, %2 \n\t"
- "movq %%mm5, %0 \n\t"
- "movq %%mm6, %3 \n\t"
- : "+m" (*(uint64_t*)(src - 2*stride)),
- "+m" (*(uint64_t*)(src - 1*stride)),
- "+m" (*(uint64_t*)(src + 0*stride)),
- "+m" (*(uint64_t*)(src + 1*stride))
- : "g" (2*strength), "m"(ff_pb_FC)
- );
+ __asm__ volatile (
+ H263_LOOP_FILTER
+
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm4, %2 \n\t"
+ "movq %%mm5, %0 \n\t"
+ "movq %%mm6, %3 \n\t"
+ : "+m"(*(uint64_t*)(src - 2 * stride)),
+ "+m"(*(uint64_t*)(src - 1 * stride)),
+ "+m"(*(uint64_t*)(src + 0 * stride)),
+ "+m"(*(uint64_t*)(src + 1 * stride))
+ : "g"(2 * strength), "m"(ff_pb_FC)
+ );
}
}
@@ -733,46 +733,46 @@ static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale)
transpose4x4(btemp, src, 8, stride);
transpose4x4(btemp + 4, src + 4 * stride, 8, stride);
- __asm__ volatile(
- H263_LOOP_FILTER // 5 3 4 6
-
- : "+m" (temp[0]),
- "+m" (temp[1]),
- "+m" (temp[2]),
- "+m" (temp[3])
- : "g" (2*strength), "m"(ff_pb_FC)
- );
+ __asm__ volatile (
+ H263_LOOP_FILTER // 5 3 4 6
+
+ : "+m"(temp[0]),
+ "+m"(temp[1]),
+ "+m"(temp[2]),
+ "+m"(temp[3])
+ : "g"(2 * strength), "m"(ff_pb_FC)
+ );
- __asm__ volatile(
- "movq %%mm5, %%mm1 \n\t"
- "movq %%mm4, %%mm0 \n\t"
- "punpcklbw %%mm3, %%mm5 \n\t"
- "punpcklbw %%mm6, %%mm4 \n\t"
- "punpckhbw %%mm3, %%mm1 \n\t"
- "punpckhbw %%mm6, %%mm0 \n\t"
- "movq %%mm5, %%mm3 \n\t"
- "movq %%mm1, %%mm6 \n\t"
- "punpcklwd %%mm4, %%mm5 \n\t"
- "punpcklwd %%mm0, %%mm1 \n\t"
- "punpckhwd %%mm4, %%mm3 \n\t"
- "punpckhwd %%mm0, %%mm6 \n\t"
- "movd %%mm5, (%0) \n\t"
- "punpckhdq %%mm5, %%mm5 \n\t"
- "movd %%mm5, (%0,%2) \n\t"
- "movd %%mm3, (%0,%2,2) \n\t"
- "punpckhdq %%mm3, %%mm3 \n\t"
- "movd %%mm3, (%0,%3) \n\t"
- "movd %%mm1, (%1) \n\t"
- "punpckhdq %%mm1, %%mm1 \n\t"
- "movd %%mm1, (%1,%2) \n\t"
- "movd %%mm6, (%1,%2,2) \n\t"
- "punpckhdq %%mm6, %%mm6 \n\t"
- "movd %%mm6, (%1,%3) \n\t"
- :: "r" (src),
- "r" (src + 4*stride),
- "r" ((x86_reg) stride ),
- "r" ((x86_reg)(3*stride))
- );
+ __asm__ volatile (
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm4, %%mm0 \n\t"
+ "punpcklbw %%mm3, %%mm5 \n\t"
+ "punpcklbw %%mm6, %%mm4 \n\t"
+ "punpckhbw %%mm3, %%mm1 \n\t"
+ "punpckhbw %%mm6, %%mm0 \n\t"
+ "movq %%mm5, %%mm3 \n\t"
+ "movq %%mm1, %%mm6 \n\t"
+ "punpcklwd %%mm4, %%mm5 \n\t"
+ "punpcklwd %%mm0, %%mm1 \n\t"
+ "punpckhwd %%mm4, %%mm3 \n\t"
+ "punpckhwd %%mm0, %%mm6 \n\t"
+ "movd %%mm5, (%0) \n\t"
+ "punpckhdq %%mm5, %%mm5 \n\t"
+ "movd %%mm5, (%0, %2) \n\t"
+ "movd %%mm3, (%0, %2, 2) \n\t"
+ "punpckhdq %%mm3, %%mm3 \n\t"
+ "movd %%mm3, (%0, %3) \n\t"
+ "movd %%mm1, (%1) \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movd %%mm1, (%1, %2) \n\t"
+ "movd %%mm6, (%1, %2, 2) \n\t"
+ "punpckhdq %%mm6, %%mm6 \n\t"
+ "movd %%mm6, (%1, %3) \n\t"
+ :: "r"(src),
+ "r"(src + 4 * stride),
+ "r"((x86_reg)stride),
+ "r"((x86_reg)(3 * stride))
+ );
}
}
@@ -788,411 +788,418 @@ static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
/* left and right */
ptr = buf;
if (w == 8) {
- __asm__ volatile(
- "1: \n\t"
- "movd (%0), %%mm0 \n\t"
- "punpcklbw %%mm0, %%mm0 \n\t"
- "punpcklwd %%mm0, %%mm0 \n\t"
- "punpckldq %%mm0, %%mm0 \n\t"
- "movq %%mm0, -8(%0) \n\t"
- "movq -8(%0, %2), %%mm1 \n\t"
- "punpckhbw %%mm1, %%mm1 \n\t"
- "punpckhwd %%mm1, %%mm1 \n\t"
- "punpckhdq %%mm1, %%mm1 \n\t"
- "movq %%mm1, (%0, %2) \n\t"
- "add %1, %0 \n\t"
- "cmp %3, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (ptr)
- : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
- );
+ __asm__ volatile (
+ "1: \n\t"
+ "movd (%0), %%mm0 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpcklwd %%mm0, %%mm0 \n\t"
+ "punpckldq %%mm0, %%mm0 \n\t"
+ "movq %%mm0, -8(%0) \n\t"
+ "movq -8(%0, %2), %%mm1 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpckhwd %%mm1, %%mm1 \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movq %%mm1, (%0, %2) \n\t"
+ "add %1, %0 \n\t"
+ "cmp %3, %0 \n\t"
+ "jb 1b \n\t"
+ : "+r"(ptr)
+ : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
+ );
} else {
- __asm__ volatile(
- "1: \n\t"
- "movd (%0), %%mm0 \n\t"
- "punpcklbw %%mm0, %%mm0 \n\t"
- "punpcklwd %%mm0, %%mm0 \n\t"
- "punpckldq %%mm0, %%mm0 \n\t"
- "movq %%mm0, -8(%0) \n\t"
- "movq %%mm0, -16(%0) \n\t"
- "movq -8(%0, %2), %%mm1 \n\t"
- "punpckhbw %%mm1, %%mm1 \n\t"
- "punpckhwd %%mm1, %%mm1 \n\t"
- "punpckhdq %%mm1, %%mm1 \n\t"
- "movq %%mm1, (%0, %2) \n\t"
- "movq %%mm1, 8(%0, %2) \n\t"
- "add %1, %0 \n\t"
- "cmp %3, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (ptr)
- : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
- );
+ __asm__ volatile (
+ "1: \n\t"
+ "movd (%0), %%mm0 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpcklwd %%mm0, %%mm0 \n\t"
+ "punpckldq %%mm0, %%mm0 \n\t"
+ "movq %%mm0, -8(%0) \n\t"
+ "movq %%mm0, -16(%0) \n\t"
+ "movq -8(%0, %2), %%mm1 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpckhwd %%mm1, %%mm1 \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movq %%mm1, (%0, %2) \n\t"
+ "movq %%mm1, 8(%0, %2) \n\t"
+ "add %1, %0 \n\t"
+ "cmp %3, %0 \n\t"
+ "jb 1b \n\t"
+ : "+r"(ptr)
+ : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
+ );
}
/* top and bottom (and hopefully also the corners) */
if (sides & EDGE_TOP) {
for (i = 0; i < h; i += 4) {
ptr = buf - (i + 1) * wrap - w;
- __asm__ volatile(
- "1: \n\t"
- "movq (%1, %0), %%mm0 \n\t"
- "movq %%mm0, (%0) \n\t"
- "movq %%mm0, (%0, %2) \n\t"
- "movq %%mm0, (%0, %2, 2) \n\t"
- "movq %%mm0, (%0, %3) \n\t"
- "add $8, %0 \n\t"
- "cmp %4, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (ptr)
- : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
- );
+ __asm__ volatile (
+ "1: \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm0, (%0, %2) \n\t"
+ "movq %%mm0, (%0, %2, 2) \n\t"
+ "movq %%mm0, (%0, %3) \n\t"
+ "add $8, %0 \n\t"
+ "cmp %4, %0 \n\t"
+ "jb 1b \n\t"
+ : "+r"(ptr)
+ : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
+ "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
+ );
}
}
if (sides & EDGE_BOTTOM) {
for (i = 0; i < h; i += 4) {
ptr = last_line + (i + 1) * wrap - w;
- __asm__ volatile(
- "1: \n\t"
- "movq (%1, %0), %%mm0 \n\t"
- "movq %%mm0, (%0) \n\t"
- "movq %%mm0, (%0, %2) \n\t"
- "movq %%mm0, (%0, %2, 2) \n\t"
- "movq %%mm0, (%0, %3) \n\t"
- "add $8, %0 \n\t"
- "cmp %4, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (ptr)
- : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
- );
+ __asm__ volatile (
+ "1: \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm0, (%0, %2) \n\t"
+ "movq %%mm0, (%0, %2, 2) \n\t"
+ "movq %%mm0, (%0, %3) \n\t"
+ "add $8, %0 \n\t"
+ "cmp %4, %0 \n\t"
+ "jb 1b \n\t"
+ : "+r"(ptr)
+ : "r"((x86_reg)last_line - (x86_reg)ptr - w),
+ "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
+ "r"(ptr + width + 2 * w)
+ );
}
}
}
-#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
- "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
- "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
- "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
- "movq "#in7", " #m3 " \n\t" /* d */\
- "movq "#in0", %%mm5 \n\t" /* D */\
- "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
- "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
- "movq "#in1", %%mm5 \n\t" /* C */\
- "movq "#in2", %%mm6 \n\t" /* B */\
- "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
- "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
- "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
- "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
- "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
- "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
- "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
- "psraw $5, %%mm5 \n\t"\
- "packuswb %%mm5, %%mm5 \n\t"\
- OP(%%mm5, out, %%mm7, d)
-
-#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW) \
-static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, \
- uint8_t *src, \
- int dstStride, \
- int srcStride, \
- int h) \
-{ \
- uint64_t temp; \
- \
- __asm__ volatile(\
- "pxor %%mm7, %%mm7 \n\t"\
- "1: \n\t"\
- "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
- "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
- "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
- "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
- "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
- "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
- "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
- "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
- "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
- "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
- "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
- "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
- "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
- "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
- "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
- "paddw %%mm3, %%mm5 \n\t" /* b */\
- "paddw %%mm2, %%mm6 \n\t" /* c */\
- "paddw %%mm5, %%mm5 \n\t" /* 2b */\
- "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
- "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
- "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
- "paddw %%mm4, %%mm0 \n\t" /* a */\
- "paddw %%mm1, %%mm5 \n\t" /* d */\
- "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
- "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
- "paddw %6, %%mm6 \n\t"\
- "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
- "psraw $5, %%mm0 \n\t"\
- "movq %%mm0, %5 \n\t"\
- /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
- \
- "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
- "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
- "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
- "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
- "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
- "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
- "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
- "paddw %%mm0, %%mm2 \n\t" /* b */\
- "paddw %%mm5, %%mm3 \n\t" /* c */\
- "paddw %%mm2, %%mm2 \n\t" /* 2b */\
- "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
- "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
- "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
- "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
- "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
- "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
- "paddw %%mm2, %%mm1 \n\t" /* a */\
- "paddw %%mm6, %%mm4 \n\t" /* d */\
- "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
- "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
- "paddw %6, %%mm1 \n\t"\
- "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
- "psraw $5, %%mm3 \n\t"\
- "movq %5, %%mm1 \n\t"\
- "packuswb %%mm3, %%mm1 \n\t"\
- OP_MMX2(%%mm1, (%1),%%mm4, q)\
- /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
- \
- "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
- "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
- "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
- "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
- "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
- "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
- "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
- "paddw %%mm1, %%mm5 \n\t" /* b */\
- "paddw %%mm4, %%mm0 \n\t" /* c */\
- "paddw %%mm5, %%mm5 \n\t" /* 2b */\
- "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
- "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
- "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
- "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
- "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
- "paddw %%mm3, %%mm2 \n\t" /* d */\
- "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
- "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
- "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
- "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
- "paddw %%mm2, %%mm6 \n\t" /* a */\
- "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
- "paddw %6, %%mm0 \n\t"\
- "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
- "psraw $5, %%mm0 \n\t"\
- /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
- \
- "paddw %%mm5, %%mm3 \n\t" /* a */\
- "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
- "paddw %%mm4, %%mm6 \n\t" /* b */\
- "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
- "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
- "paddw %%mm1, %%mm4 \n\t" /* c */\
- "paddw %%mm2, %%mm5 \n\t" /* d */\
- "paddw %%mm6, %%mm6 \n\t" /* 2b */\
- "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
- "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
- "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
- "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
- "paddw %6, %%mm4 \n\t"\
- "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
- "psraw $5, %%mm4 \n\t"\
- "packuswb %%mm4, %%mm0 \n\t"\
- OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
- \
- "add %3, %0 \n\t"\
- "add %4, %1 \n\t"\
- "decl %2 \n\t"\
- " jnz 1b \n\t"\
- : "+a"(src), "+c"(dst), "+D"(h)\
- : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
- : "memory"\
- );\
-} \
- \
-static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, \
- uint8_t *src, \
- int dstStride, \
- int srcStride, \
- int h) \
-{ \
- int i; \
- int16_t temp[16]; \
- /* quick HACK, XXX FIXME MUST be optimized */ \
- for (i = 0; i < h; i++) { \
- temp[ 0] = (src[ 0] + src[ 1]) * 20 - (src[ 0] + src[ 2]) * 6 + \
- (src[ 1] + src[ 3]) * 3 - (src[ 2] + src[ 4]); \
- temp[ 1] = (src[ 1] + src[ 2]) * 20 - (src[ 0] + src[ 3]) * 6 + \
- (src[ 0] + src[ 4]) * 3 - (src[ 1] + src[ 5]); \
- temp[ 2] = (src[ 2] + src[ 3]) * 20 - (src[ 1] + src[ 4]) * 6 + \
- (src[ 0] + src[ 5]) * 3 - (src[ 0] + src[ 6]); \
- temp[ 3] = (src[ 3] + src[ 4]) * 20 - (src[ 2] + src[ 5]) * 6 + \
- (src[ 1] + src[ 6]) * 3 - (src[ 0] + src[ 7]); \
- temp[ 4] = (src[ 4] + src[ 5]) * 20 - (src[ 3] + src[ 6]) * 6 + \
- (src[ 2] + src[ 7]) * 3 - (src[ 1] + src[ 8]); \
- temp[ 5] = (src[ 5] + src[ 6]) * 20 - (src[ 4] + src[ 7]) * 6 + \
- (src[ 3] + src[ 8]) * 3 - (src[ 2] + src[ 9]); \
- temp[ 6] = (src[ 6] + src[ 7]) * 20 - (src[ 5] + src[ 8]) * 6 + \
- (src[ 4] + src[ 9]) * 3 - (src[ 3] + src[10]); \
- temp[ 7] = (src[ 7] + src[ 8]) * 20 - (src[ 6] + src[ 9]) * 6 + \
- (src[ 5] + src[10]) * 3 - (src[ 4] + src[11]); \
- temp[ 8] = (src[ 8] + src[ 9]) * 20 - (src[ 7] + src[10]) * 6 + \
- (src[ 6] + src[11]) * 3 - (src[ 5] + src[12]); \
- temp[ 9] = (src[ 9] + src[10]) * 20 - (src[ 8] + src[11]) * 6 + \
- (src[ 7] + src[12]) * 3 - (src[ 6] + src[13]); \
- temp[10] = (src[10] + src[11]) * 20 - (src[ 9] + src[12]) * 6 + \
- (src[ 8] + src[13]) * 3 - (src[ 7] + src[14]); \
- temp[11] = (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + \
- (src[ 9] + src[14]) * 3 - (src[ 8] + src[15]); \
- temp[12] = (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + \
- (src[10] + src[15]) * 3 - (src[ 9] + src[16]); \
- temp[13] = (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + \
- (src[11] + src[16]) * 3 - (src[10] + src[16]); \
- temp[14] = (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + \
- (src[12] + src[16]) * 3 - (src[11] + src[15]); \
- temp[15] = (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + \
- (src[13] + src[15]) * 3 - (src[12] + src[14]); \
- __asm__ volatile(\
- "movq (%0), %%mm0 \n\t"\
- "movq 8(%0), %%mm1 \n\t"\
- "paddw %2, %%mm0 \n\t"\
- "paddw %2, %%mm1 \n\t"\
- "psraw $5, %%mm0 \n\t"\
- "psraw $5, %%mm1 \n\t"\
- "packuswb %%mm1, %%mm0 \n\t"\
- OP_3DNOW(%%mm0, (%1), %%mm1, q)\
- "movq 16(%0), %%mm0 \n\t"\
- "movq 24(%0), %%mm1 \n\t"\
- "paddw %2, %%mm0 \n\t"\
- "paddw %2, %%mm1 \n\t"\
- "psraw $5, %%mm0 \n\t"\
- "psraw $5, %%mm1 \n\t"\
- "packuswb %%mm1, %%mm0 \n\t"\
- OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
- :: "r"(temp), "r"(dst), "m"(ROUNDER)\
- : "memory"\
- );\
- dst += dstStride; \
- src += srcStride; \
- } \
-} \
- \
-static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, \
- uint8_t *src, \
- int dstStride, \
- int srcStride, \
- int h) \
-{ \
- __asm__ volatile(\
- "pxor %%mm7, %%mm7 \n\t"\
- "1: \n\t"\
- "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
- "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
- "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
- "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
- "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
- "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
- "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
- "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
- "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
- "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
- "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
- "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
- "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
- "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
- "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
- "paddw %%mm3, %%mm5 \n\t" /* b */\
- "paddw %%mm2, %%mm6 \n\t" /* c */\
- "paddw %%mm5, %%mm5 \n\t" /* 2b */\
- "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
- "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
- "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
- "paddw %%mm4, %%mm0 \n\t" /* a */\
- "paddw %%mm1, %%mm5 \n\t" /* d */\
- "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
- "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
- "paddw %5, %%mm6 \n\t"\
- "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
- "psraw $5, %%mm0 \n\t"\
- /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
- \
- "movd 5(%0), %%mm5 \n\t" /* FGHI */\
- "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
- "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
- "paddw %%mm5, %%mm1 \n\t" /* a */\
- "paddw %%mm6, %%mm2 \n\t" /* b */\
- "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
- "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
- "paddw %%mm6, %%mm3 \n\t" /* c */\
- "paddw %%mm5, %%mm4 \n\t" /* d */\
- "paddw %%mm2, %%mm2 \n\t" /* 2b */\
- "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
- "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
- "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
- "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
- "paddw %5, %%mm1 \n\t"\
- "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
- "psraw $5, %%mm3 \n\t"\
- "packuswb %%mm3, %%mm0 \n\t"\
- OP_MMX2(%%mm0, (%1), %%mm4, q)\
- \
- "add %3, %0 \n\t"\
- "add %4, %1 \n\t"\
- "decl %2 \n\t"\
- " jnz 1b \n\t"\
- : "+a"(src), "+c"(dst), "+d"(h)\
- : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
- : "memory"\
- );\
-} \
- \
-static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, \
- uint8_t *src, \
- int dstStride, \
- int srcStride, \
- int h) \
-{ \
- int i; \
- int16_t temp[8]; \
- /* quick HACK, XXX FIXME MUST be optimized */ \
- for (i = 0; i < h; i++) { \
- temp[0] = (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + \
- (src[1] + src[3]) * 3 - (src[2] + src[4]); \
- temp[1] = (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + \
- (src[0] + src[4]) * 3 - (src[1] + src[5]); \
- temp[2] = (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + \
- (src[0] + src[5]) * 3 - (src[0] + src[6]); \
- temp[3] = (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + \
- (src[1] + src[6]) * 3 - (src[0] + src[7]); \
- temp[4] = (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + \
- (src[2] + src[7]) * 3 - (src[1] + src[8]); \
- temp[5] = (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + \
- (src[3] + src[8]) * 3 - (src[2] + src[8]); \
- temp[6] = (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + \
- (src[4] + src[8]) * 3 - (src[3] + src[7]); \
- temp[7] = (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + \
- (src[5] + src[7]) * 3 - (src[4] + src[6]); \
- __asm__ volatile(\
- "movq (%0), %%mm0 \n\t"\
- "movq 8(%0), %%mm1 \n\t"\
- "paddw %2, %%mm0 \n\t"\
- "paddw %2, %%mm1 \n\t"\
- "psraw $5, %%mm0 \n\t"\
- "psraw $5, %%mm1 \n\t"\
- "packuswb %%mm1, %%mm0 \n\t"\
- OP_3DNOW(%%mm0, (%1), %%mm1, q)\
- :: "r"(temp), "r"(dst), "m"(ROUNDER)\
- :"memory"\
- );\
- dst += dstStride; \
- src += srcStride; \
- } \
+#define QPEL_V_LOW(m3, m4, m5, m6, pw_20, pw_3, rnd, \
+ in0, in1, in2, in7, out, OP) \
+ "paddw "#m4", "#m3" \n\t" /* x1 */ \
+ "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */ \
+ "pmullw "#m3", %%mm4 \n\t" /* 20x1 */ \
+ "movq "#in7", "#m3" \n\t" /* d */ \
+ "movq "#in0", %%mm5 \n\t" /* D */ \
+ "paddw "#m3", %%mm5 \n\t" /* x4 */ \
+ "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */ \
+ "movq "#in1", %%mm5 \n\t" /* C */ \
+ "movq "#in2", %%mm6 \n\t" /* B */ \
+ "paddw "#m6", %%mm5 \n\t" /* x3 */ \
+ "paddw "#m5", %%mm6 \n\t" /* x2 */ \
+ "paddw %%mm6, %%mm6 \n\t" /* 2x2 */ \
+ "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */ \
+ "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */ \
+ "paddw "#rnd", %%mm4 \n\t" /* x2 */ \
+ "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */ \
+ "psraw $5, %%mm5 \n\t" \
+ "packuswb %%mm5, %%mm5 \n\t" \
+ OP(%%mm5, out, %%mm7, d)
+
+#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW) \
+static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, \
+ uint8_t *src, \
+ int dstStride, \
+ int srcStride, \
+ int h) \
+{ \
+ uint64_t temp; \
+ \
+ __asm__ volatile ( \
+ "pxor %%mm7, %%mm7 \n\t" \
+ "1: \n\t" \
+ "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
+ "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
+ "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
+ "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
+ "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
+ "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
+ "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
+ "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
+ "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
+ "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
+ "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
+ "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
+ "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
+ "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
+ "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
+ "paddw %%mm3, %%mm5 \n\t" /* b */ \
+ "paddw %%mm2, %%mm6 \n\t" /* c */ \
+ "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
+ "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
+ "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
+ "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
+ "paddw %%mm4, %%mm0 \n\t" /* a */ \
+ "paddw %%mm1, %%mm5 \n\t" /* d */ \
+ "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
+ "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
+ "paddw %6, %%mm6 \n\t" \
+ "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
+ "psraw $5, %%mm0 \n\t" \
+ "movq %%mm0, %5 \n\t" \
+ /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
+ \
+ "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */ \
+ "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */ \
+ "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */ \
+ "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */ \
+ "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */ \
+ "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */ \
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */ \
+ "paddw %%mm0, %%mm2 \n\t" /* b */ \
+ "paddw %%mm5, %%mm3 \n\t" /* c */ \
+ "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
+ "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
+ "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */ \
+ "psrlq $24, %%mm6 \n\t" /* IJKLM000 */ \
+ "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */ \
+ "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */ \
+ "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
+ "paddw %%mm2, %%mm1 \n\t" /* a */ \
+ "paddw %%mm6, %%mm4 \n\t" /* d */ \
+ "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
+ "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */ \
+ "paddw %6, %%mm1 \n\t" \
+ "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */ \
+ "psraw $5, %%mm3 \n\t" \
+ "movq %5, %%mm1 \n\t" \
+ "packuswb %%mm3, %%mm1 \n\t" \
+ OP_MMX2(%%mm1, (%1), %%mm4, q) \
+ /* mm0 = GHIJ, mm2 = FGHI, mm5 = HIJK, mm6 = IJKL, mm7 = 0 */ \
+ \
+ "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */ \
+ "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */ \
+ "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */ \
+ "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */ \
+ "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */ \
+ "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */ \
+ "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */ \
+ "paddw %%mm1, %%mm5 \n\t" /* b */ \
+ "paddw %%mm4, %%mm0 \n\t" /* c */ \
+ "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
+ "psubw %%mm5, %%mm0 \n\t" /* c - 2b */ \
+ "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */ \
+ "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */ \
+ "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */ \
+ "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */ \
+ "paddw %%mm3, %%mm2 \n\t" /* d */ \
+ "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */ \
+ "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */ \
+ "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */ \
+ "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */ \
+ "paddw %%mm2, %%mm6 \n\t" /* a */ \
+ "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */ \
+ "paddw %6, %%mm0 \n\t" \
+ "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
+ "psraw $5, %%mm0 \n\t" \
+ /* mm1 = KLMN, mm2 = JKLM, mm3 = MNOP, */ \
+ /* mm4 = LMNO, mm5 = NOPQ mm7 = 0 */ \
+ \
+ "paddw %%mm5, %%mm3 \n\t" /* a */ \
+ "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */ \
+ "paddw %%mm4, %%mm6 \n\t" /* b */ \
+ "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */ \
+ "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */ \
+ "paddw %%mm1, %%mm4 \n\t" /* c */ \
+ "paddw %%mm2, %%mm5 \n\t" /* d */ \
+ "paddw %%mm6, %%mm6 \n\t" /* 2b */ \
+ "psubw %%mm6, %%mm4 \n\t" /* c - 2b */ \
+ "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */ \
+ "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */ \
+ "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */ \
+ "paddw %6, %%mm4 \n\t" \
+ "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */ \
+ "psraw $5, %%mm4 \n\t" \
+ "packuswb %%mm4, %%mm0 \n\t" \
+ OP_MMX2(%%mm0, 8(%1), %%mm4, q) \
+ \
+ "add %3, %0 \n\t" \
+ "add %4, %1 \n\t" \
+ "decl %2 \n\t" \
+ "jnz 1b \n\t" \
+ : "+a"(src), "+c"(dst), "+D"(h) \
+ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), \
+ /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(temp), "m"(ROUNDER) \
+ : "memory" \
+ ); \
+} \
+ \
+static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, \
+ uint8_t *src, \
+ int dstStride, \
+ int srcStride, \
+ int h) \
+{ \
+ int i; \
+ int16_t temp[16]; \
+ /* quick HACK, XXX FIXME MUST be optimized */ \
+ for (i = 0; i < h; i++) { \
+ temp[ 0] = (src[ 0] + src[ 1]) * 20 - (src[ 0] + src[ 2]) * 6 + \
+ (src[ 1] + src[ 3]) * 3 - (src[ 2] + src[ 4]); \
+ temp[ 1] = (src[ 1] + src[ 2]) * 20 - (src[ 0] + src[ 3]) * 6 + \
+ (src[ 0] + src[ 4]) * 3 - (src[ 1] + src[ 5]); \
+ temp[ 2] = (src[ 2] + src[ 3]) * 20 - (src[ 1] + src[ 4]) * 6 + \
+ (src[ 0] + src[ 5]) * 3 - (src[ 0] + src[ 6]); \
+ temp[ 3] = (src[ 3] + src[ 4]) * 20 - (src[ 2] + src[ 5]) * 6 + \
+ (src[ 1] + src[ 6]) * 3 - (src[ 0] + src[ 7]); \
+ temp[ 4] = (src[ 4] + src[ 5]) * 20 - (src[ 3] + src[ 6]) * 6 + \
+ (src[ 2] + src[ 7]) * 3 - (src[ 1] + src[ 8]); \
+ temp[ 5] = (src[ 5] + src[ 6]) * 20 - (src[ 4] + src[ 7]) * 6 + \
+ (src[ 3] + src[ 8]) * 3 - (src[ 2] + src[ 9]); \
+ temp[ 6] = (src[ 6] + src[ 7]) * 20 - (src[ 5] + src[ 8]) * 6 + \
+ (src[ 4] + src[ 9]) * 3 - (src[ 3] + src[10]); \
+ temp[ 7] = (src[ 7] + src[ 8]) * 20 - (src[ 6] + src[ 9]) * 6 + \
+ (src[ 5] + src[10]) * 3 - (src[ 4] + src[11]); \
+ temp[ 8] = (src[ 8] + src[ 9]) * 20 - (src[ 7] + src[10]) * 6 + \
+ (src[ 6] + src[11]) * 3 - (src[ 5] + src[12]); \
+ temp[ 9] = (src[ 9] + src[10]) * 20 - (src[ 8] + src[11]) * 6 + \
+ (src[ 7] + src[12]) * 3 - (src[ 6] + src[13]); \
+ temp[10] = (src[10] + src[11]) * 20 - (src[ 9] + src[12]) * 6 + \
+ (src[ 8] + src[13]) * 3 - (src[ 7] + src[14]); \
+ temp[11] = (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + \
+ (src[ 9] + src[14]) * 3 - (src[ 8] + src[15]); \
+ temp[12] = (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + \
+ (src[10] + src[15]) * 3 - (src[ 9] + src[16]); \
+ temp[13] = (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + \
+ (src[11] + src[16]) * 3 - (src[10] + src[16]); \
+ temp[14] = (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + \
+ (src[12] + src[16]) * 3 - (src[11] + src[15]); \
+ temp[15] = (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + \
+ (src[13] + src[15]) * 3 - (src[12] + src[14]); \
+ __asm__ volatile ( \
+ "movq (%0), %%mm0 \n\t" \
+ "movq 8(%0), %%mm1 \n\t" \
+ "paddw %2, %%mm0 \n\t" \
+ "paddw %2, %%mm1 \n\t" \
+ "psraw $5, %%mm0 \n\t" \
+ "psraw $5, %%mm1 \n\t" \
+ "packuswb %%mm1, %%mm0 \n\t" \
+ OP_3DNOW(%%mm0, (%1), %%mm1, q) \
+ "movq 16(%0), %%mm0 \n\t" \
+ "movq 24(%0), %%mm1 \n\t" \
+ "paddw %2, %%mm0 \n\t" \
+ "paddw %2, %%mm1 \n\t" \
+ "psraw $5, %%mm0 \n\t" \
+ "psraw $5, %%mm1 \n\t" \
+ "packuswb %%mm1, %%mm0 \n\t" \
+ OP_3DNOW(%%mm0, 8(%1), %%mm1, q) \
+ :: "r"(temp), "r"(dst), "m"(ROUNDER) \
+ : "memory" \
+ ); \
+ dst += dstStride; \
+ src += srcStride; \
+ } \
+} \
+ \
+static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, \
+ uint8_t *src, \
+ int dstStride, \
+ int srcStride, \
+ int h) \
+{ \
+ __asm__ volatile ( \
+ "pxor %%mm7, %%mm7 \n\t" \
+ "1: \n\t" \
+ "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
+ "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
+ "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
+ "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
+ "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
+ "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
+ "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
+ "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
+ "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
+ "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
+ "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
+ "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
+ "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
+ "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
+ "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
+ "paddw %%mm3, %%mm5 \n\t" /* b */ \
+ "paddw %%mm2, %%mm6 \n\t" /* c */ \
+ "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
+ "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
+ "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
+ "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
+ "paddw %%mm4, %%mm0 \n\t" /* a */ \
+ "paddw %%mm1, %%mm5 \n\t" /* d */ \
+ "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
+ "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
+ "paddw %5, %%mm6 \n\t" \
+ "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
+ "psraw $5, %%mm0 \n\t" \
+ /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
+ \
+ "movd 5(%0), %%mm5 \n\t" /* FGHI */ \
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */ \
+ "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */ \
+ "paddw %%mm5, %%mm1 \n\t" /* a */ \
+ "paddw %%mm6, %%mm2 \n\t" /* b */ \
+ "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */ \
+ "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */ \
+ "paddw %%mm6, %%mm3 \n\t" /* c */ \
+ "paddw %%mm5, %%mm4 \n\t" /* d */ \
+ "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
+ "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
+ "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
+ "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
+ "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */ \
+ "paddw %5, %%mm1 \n\t" \
+ "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */ \
+ "psraw $5, %%mm3 \n\t" \
+ "packuswb %%mm3, %%mm0 \n\t" \
+ OP_MMX2(%%mm0, (%1), %%mm4, q) \
+ \
+ "add %3, %0 \n\t" \
+ "add %4, %1 \n\t" \
+ "decl %2 \n\t" \
+ "jnz 1b \n\t" \
+ : "+a"(src), "+c"(dst), "+d"(h) \
+ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), \
+ /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER) \
+ : "memory" \
+ ); \
+} \
+ \
+static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, \
+ uint8_t *src, \
+ int dstStride, \
+ int srcStride, \
+ int h) \
+{ \
+ int i; \
+ int16_t temp[8]; \
+ /* quick HACK, XXX FIXME MUST be optimized */ \
+ for (i = 0; i < h; i++) { \
+ temp[0] = (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + \
+ (src[1] + src[3]) * 3 - (src[2] + src[4]); \
+ temp[1] = (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + \
+ (src[0] + src[4]) * 3 - (src[1] + src[5]); \
+ temp[2] = (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + \
+ (src[0] + src[5]) * 3 - (src[0] + src[6]); \
+ temp[3] = (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + \
+ (src[1] + src[6]) * 3 - (src[0] + src[7]); \
+ temp[4] = (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + \
+ (src[2] + src[7]) * 3 - (src[1] + src[8]); \
+ temp[5] = (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + \
+ (src[3] + src[8]) * 3 - (src[2] + src[8]); \
+ temp[6] = (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + \
+ (src[4] + src[8]) * 3 - (src[3] + src[7]); \
+ temp[7] = (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + \
+ (src[5] + src[7]) * 3 - (src[4] + src[6]); \
+ __asm__ volatile ( \
+ "movq (%0), %%mm0 \n\t" \
+ "movq 8(%0), %%mm1 \n\t" \
+ "paddw %2, %%mm0 \n\t" \
+ "paddw %2, %%mm1 \n\t" \
+ "psraw $5, %%mm0 \n\t" \
+ "psraw $5, %%mm1 \n\t" \
+ "packuswb %%mm1, %%mm0 \n\t" \
+ OP_3DNOW(%%mm0, (%1), %%mm1, q) \
+ :: "r"(temp), "r"(dst), "m"(ROUNDER) \
+ : "memory" \
+ ); \
+ dst += dstStride; \
+ src += srcStride; \
+ } \
}
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX) \
@@ -1205,77 +1212,79 @@ static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, \
uint64_t *temp_ptr = temp; \
int count = 17; \
\
- /*FIXME unroll */\
- __asm__ volatile(\
- "pxor %%mm7, %%mm7 \n\t"\
- "1: \n\t"\
- "movq (%0), %%mm0 \n\t"\
- "movq (%0), %%mm1 \n\t"\
- "movq 8(%0), %%mm2 \n\t"\
- "movq 8(%0), %%mm3 \n\t"\
- "punpcklbw %%mm7, %%mm0 \n\t"\
- "punpckhbw %%mm7, %%mm1 \n\t"\
- "punpcklbw %%mm7, %%mm2 \n\t"\
- "punpckhbw %%mm7, %%mm3 \n\t"\
- "movq %%mm0, (%1) \n\t"\
- "movq %%mm1, 17*8(%1) \n\t"\
- "movq %%mm2, 2*17*8(%1) \n\t"\
- "movq %%mm3, 3*17*8(%1) \n\t"\
- "add $8, %1 \n\t"\
- "add %3, %0 \n\t"\
- "decl %2 \n\t"\
- " jnz 1b \n\t"\
- : "+r" (src), "+r" (temp_ptr), "+r"(count)\
- : "r" ((x86_reg)srcStride)\
- : "memory"\
- );\
+ /* FIXME unroll */ \
+ __asm__ volatile ( \
+ "pxor %%mm7, %%mm7 \n\t" \
+ "1: \n\t" \
+ "movq (%0), %%mm0 \n\t" \
+ "movq (%0), %%mm1 \n\t" \
+ "movq 8(%0), %%mm2 \n\t" \
+ "movq 8(%0), %%mm3 \n\t" \
+ "punpcklbw %%mm7, %%mm0 \n\t" \
+ "punpckhbw %%mm7, %%mm1 \n\t" \
+ "punpcklbw %%mm7, %%mm2 \n\t" \
+ "punpckhbw %%mm7, %%mm3 \n\t" \
+ "movq %%mm0, (%1) \n\t" \
+ "movq %%mm1, 17 * 8(%1) \n\t" \
+ "movq %%mm2, 2 * 17 * 8(%1) \n\t" \
+ "movq %%mm3, 3 * 17 * 8(%1) \n\t" \
+ "add $8, %1 \n\t" \
+ "add %3, %0 \n\t" \
+ "decl %2 \n\t" \
+ "jnz 1b \n\t" \
+ : "+r"(src), "+r"(temp_ptr), "+r"(count) \
+ : "r"((x86_reg)srcStride) \
+ : "memory" \
+ ); \
\
temp_ptr = temp; \
count = 4; \
\
-/*FIXME reorder for speed */\
- __asm__ volatile(\
- /*"pxor %%mm7, %%mm7 \n\t"*/\
- "1: \n\t"\
- "movq (%0), %%mm0 \n\t"\
- "movq 8(%0), %%mm1 \n\t"\
- "movq 16(%0), %%mm2 \n\t"\
- "movq 24(%0), %%mm3 \n\t"\
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
- "add %4, %1 \n\t"\
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
- \
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
- "add %4, %1 \n\t"\
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
- "add %4, %1 \n\t"\
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
- "add %4, %1 \n\t"\
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
- "add %4, %1 \n\t"\
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
- "add %4, %1 \n\t"\
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
- \
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
- "add %4, %1 \n\t" \
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
- \
- "add $136, %0 \n\t"\
- "add %6, %1 \n\t"\
- "decl %2 \n\t"\
- " jnz 1b \n\t"\
- \
- : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
- : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
- :"memory"\
- );\
+ /* FIXME reorder for speed */ \
+ __asm__ volatile ( \
+ /* "pxor %%mm7, %%mm7 \n\t" */ \
+ "1: \n\t" \
+ "movq (%0), %%mm0 \n\t" \
+ "movq 8(%0), %%mm1 \n\t" \
+ "movq 16(%0), %%mm2 \n\t" \
+ "movq 24(%0), %%mm3 \n\t" \
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
+ \
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP) \
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP) \
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0), 104(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0), 112(%0), (%1), OP) \
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0), 120(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0), 128(%0), (%1), OP) \
+ \
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0), 128(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0), 104(%0), 120(%0), (%1), OP) \
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0), 104(%0), 112(%0), 112(%0), (%1, %3), OP) \
+ \
+ "add $136, %0 \n\t" \
+ "add %6, %1 \n\t" \
+ "decl %2 \n\t" \
+ "jnz 1b \n\t" \
+ \
+ : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
+ : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
+ /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
+ "g"(4 - 14 * (x86_reg)dstStride) \
+ : "memory" \
+ ); \
} \
\
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, \
@@ -1287,59 +1296,61 @@ static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, \
uint64_t *temp_ptr = temp; \
int count = 9; \
\
- /*FIXME unroll */\
- __asm__ volatile(\
- "pxor %%mm7, %%mm7 \n\t"\
- "1: \n\t"\
- "movq (%0), %%mm0 \n\t"\
- "movq (%0), %%mm1 \n\t"\
- "punpcklbw %%mm7, %%mm0 \n\t"\
- "punpckhbw %%mm7, %%mm1 \n\t"\
- "movq %%mm0, (%1) \n\t"\
- "movq %%mm1, 9*8(%1) \n\t"\
- "add $8, %1 \n\t"\
- "add %3, %0 \n\t"\
- "decl %2 \n\t"\
- " jnz 1b \n\t"\
- : "+r" (src), "+r" (temp_ptr), "+r"(count)\
- : "r" ((x86_reg)srcStride)\
- : "memory"\
- );\
+ /* FIXME unroll */ \
+ __asm__ volatile ( \
+ "pxor %%mm7, %%mm7 \n\t" \
+ "1: \n\t" \
+ "movq (%0), %%mm0 \n\t" \
+ "movq (%0), %%mm1 \n\t" \
+ "punpcklbw %%mm7, %%mm0 \n\t" \
+ "punpckhbw %%mm7, %%mm1 \n\t" \
+ "movq %%mm0, (%1) \n\t" \
+ "movq %%mm1, 9*8(%1) \n\t" \
+ "add $8, %1 \n\t" \
+ "add %3, %0 \n\t" \
+ "decl %2 \n\t" \
+ "jnz 1b \n\t" \
+ : "+r"(src), "+r"(temp_ptr), "+r"(count) \
+ : "r"((x86_reg)srcStride) \
+ : "memory" \
+ ); \
\
temp_ptr = temp; \
count = 2; \
\
-/*FIXME reorder for speed */\
- __asm__ volatile(\
- /*"pxor %%mm7, %%mm7 \n\t"*/\
- "1: \n\t"\
- "movq (%0), %%mm0 \n\t"\
- "movq 8(%0), %%mm1 \n\t"\
- "movq 16(%0), %%mm2 \n\t"\
- "movq 24(%0), %%mm3 \n\t"\
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
- "add %4, %1 \n\t"\
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
- \
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
- "add %4, %1 \n\t"\
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
- \
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
- "add %4, %1 \n\t"\
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
- \
- "add $72, %0 \n\t"\
- "add %6, %1 \n\t"\
- "decl %2 \n\t"\
- " jnz 1b \n\t"\
- \
- : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
- : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
- : "memory"\
- );\
+ /* FIXME reorder for speed */ \
+ __asm__ volatile ( \
+ /* "pxor %%mm7, %%mm7 \n\t" */ \
+ "1: \n\t" \
+ "movq (%0), %%mm0 \n\t" \
+ "movq 8(%0), %%mm1 \n\t" \
+ "movq 16(%0), %%mm2 \n\t" \
+ "movq 24(%0), %%mm3 \n\t" \
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
+ \
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
+ \
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP) \
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP) \
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP) \
+ \
+ "add $72, %0 \n\t" \
+ "add %6, %1 \n\t" \
+ "decl %2 \n\t" \
+ "jnz 1b \n\t" \
+ \
+ : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
+ : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
+ /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
+ "g"(4 - 6 * (x86_reg)dstStride) \
+ : "memory" \
+ ); \
} \
\
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
@@ -1696,25 +1707,28 @@ static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
}
-#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
-#define AVG_3DNOW_OP(a,b,temp, size) \
-"mov" #size " " #b ", " #temp " \n\t"\
-"pavgusb " #temp ", " #a " \n\t"\
-"mov" #size " " #a ", " #b " \n\t"
-#define AVG_MMX2_OP(a,b,temp, size) \
-"mov" #size " " #b ", " #temp " \n\t"\
-"pavgb " #temp ", " #a " \n\t"\
-"mov" #size " " #a ", " #b " \n\t"
-
-QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
-QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
-QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
-QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
-QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
-QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
-QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
-QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
-QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
+#define PUT_OP(a, b, temp, size) \
+ "mov"#size" "#a", "#b" \n\t"
+
+#define AVG_3DNOW_OP(a, b, temp, size) \
+ "mov"#size" "#b", "#temp" \n\t" \
+ "pavgusb "#temp", "#a" \n\t" \
+ "mov"#size" "#a", "#b" \n\t"
+
+#define AVG_MMX2_OP(a, b, temp, size) \
+ "mov"#size" "#b", "#temp" \n\t" \
+ "pavgb "#temp", "#a" \n\t" \
+ "mov"#size" "#a", "#b" \n\t"
+
+QPEL_BASE(put_, ff_pw_16, _, PUT_OP, PUT_OP)
+QPEL_BASE(avg_, ff_pw_16, _, AVG_MMX2_OP, AVG_3DNOW_OP)
+QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
+QPEL_OP(put_, ff_pw_16, _, PUT_OP, 3dnow)
+QPEL_OP(avg_, ff_pw_16, _, AVG_3DNOW_OP, 3dnow)
+QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
+QPEL_OP(put_, ff_pw_16, _, PUT_OP, mmx2)
+QPEL_OP(avg_, ff_pw_16, _, AVG_MMX2_OP, mmx2)
+QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
/***********************************/
/* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
@@ -1881,7 +1895,7 @@ static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
(oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
// uses more than 16 bits of subpel mv (only at huge resolution)
|| (dxx | dxy | dyx | dyy) & 15) {
- //FIXME could still use mmx for some of the rows
+ // FIXME could still use mmx for some of the rows
ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
shift, r, width, height);
return;
@@ -1894,11 +1908,11 @@ static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
src = edge_buf;
}
- __asm__ volatile(
- "movd %0, %%mm6 \n\t"
- "pxor %%mm7, %%mm7 \n\t"
- "punpcklwd %%mm6, %%mm6 \n\t"
- "punpcklwd %%mm6, %%mm6 \n\t"
+ __asm__ volatile (
+ "movd %0, %%mm6 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "punpcklwd %%mm6, %%mm6 \n\t"
+ "punpcklwd %%mm6, %%mm6 \n\t"
:: "r"(1<<shift)
);
@@ -1913,56 +1927,56 @@ static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
oys - dyys + dyxs * (x + 3) };
for (y = 0; y < h; y++) {
- __asm__ volatile(
- "movq %0, %%mm4 \n\t"
- "movq %1, %%mm5 \n\t"
- "paddw %2, %%mm4 \n\t"
- "paddw %3, %%mm5 \n\t"
- "movq %%mm4, %0 \n\t"
- "movq %%mm5, %1 \n\t"
- "psrlw $12, %%mm4 \n\t"
- "psrlw $12, %%mm5 \n\t"
+ __asm__ volatile (
+ "movq %0, %%mm4 \n\t"
+ "movq %1, %%mm5 \n\t"
+ "paddw %2, %%mm4 \n\t"
+ "paddw %3, %%mm5 \n\t"
+ "movq %%mm4, %0 \n\t"
+ "movq %%mm5, %1 \n\t"
+ "psrlw $12, %%mm4 \n\t"
+ "psrlw $12, %%mm5 \n\t"
: "+m"(*dx4), "+m"(*dy4)
: "m"(*dxy4), "m"(*dyy4)
);
- __asm__ volatile(
- "movq %%mm6, %%mm2 \n\t"
- "movq %%mm6, %%mm1 \n\t"
- "psubw %%mm4, %%mm2 \n\t"
- "psubw %%mm5, %%mm1 \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "movq %%mm4, %%mm3 \n\t"
- "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
- "pmullw %%mm5, %%mm3 \n\t" // dx*dy
- "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
- "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
-
- "movd %4, %%mm5 \n\t"
- "movd %3, %%mm4 \n\t"
+ __asm__ volatile (
+ "movq %%mm6, %%mm2 \n\t"
+ "movq %%mm6, %%mm1 \n\t"
+ "psubw %%mm4, %%mm2 \n\t"
+ "psubw %%mm5, %%mm1 \n\t"
+ "movq %%mm2, %%mm0 \n\t"
+ "movq %%mm4, %%mm3 \n\t"
+ "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
+ "pmullw %%mm5, %%mm3 \n\t" // dx * dy
+ "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
+ "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
+
+ "movd %4, %%mm5 \n\t"
+ "movd %3, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
- "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
- "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
+ "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
+ "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
- "movd %2, %%mm5 \n\t"
- "movd %1, %%mm4 \n\t"
+ "movd %2, %%mm5 \n\t"
+ "movd %1, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
- "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
- "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
- "paddw %5, %%mm1 \n\t"
- "paddw %%mm3, %%mm2 \n\t"
- "paddw %%mm1, %%mm0 \n\t"
- "paddw %%mm2, %%mm0 \n\t"
-
- "psrlw %6, %%mm0 \n\t"
- "packuswb %%mm0, %%mm0 \n\t"
- "movd %%mm0, %0 \n\t"
-
- : "=m"(dst[x+y*stride])
+ "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
+ "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
+ "paddw %5, %%mm1 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+
+ "psrlw %6, %%mm0 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "movd %%mm0, %0 \n\t"
+
+ : "=m"(dst[x + y * stride])
: "m"(src[0]), "m"(src[1]),
- "m"(src[stride]), "m"(src[stride+1]),
+ "m"(src[stride]), "m"(src[stride + 1]),
"m"(*r4), "m"(shift2)
);
src += stride;
@@ -2006,7 +2020,7 @@ static void name(void *mem, int stride, int h) \
{ \
const uint8_t *p = mem; \
do { \
- __asm__ volatile(#op" %0" :: "m"(*p)); \
+ __asm__ volatile (#op" %0" :: "m"(*p)); \
p += stride; \
} while (--h); \
}
@@ -2153,43 +2167,43 @@ static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
{
int i;
- __asm__ volatile("pxor %%mm7, %%mm7":);
+ __asm__ volatile ("pxor %%mm7, %%mm7":);
for (i = 0; i < blocksize; i += 2) {
- __asm__ volatile(
- "movq %0, %%mm0 \n\t"
- "movq %1, %%mm1 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "movq %%mm1, %%mm3 \n\t"
- "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
- "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
- "pslld $31, %%mm2 \n\t" // keep only the sign bit
- "pxor %%mm2, %%mm1 \n\t"
- "movq %%mm3, %%mm4 \n\t"
- "pand %%mm1, %%mm3 \n\t"
- "pandn %%mm1, %%mm4 \n\t"
- "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
- "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
- "movq %%mm3, %1 \n\t"
- "movq %%mm0, %0 \n\t"
- :"+m"(mag[i]), "+m"(ang[i])
- ::"memory"
+ __asm__ volatile (
+ "movq %0, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
+ "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
+ "pslld $31, %%mm2 \n\t" // keep only the sign bit
+ "pxor %%mm2, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pand %%mm1, %%mm3 \n\t"
+ "pandn %%mm1, %%mm4 \n\t"
+ "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
+ "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm0, %0 \n\t"
+ : "+m"(mag[i]), "+m"(ang[i])
+ :: "memory"
);
}
- __asm__ volatile("femms");
+ __asm__ volatile ("femms");
}
static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
{
int i;
- __asm__ volatile(
- "movaps %0, %%xmm5 \n\t"
- ::"m"(ff_pdw_80000000[0])
+ __asm__ volatile (
+ "movaps %0, %%xmm5 \n\t"
+ :: "m"(ff_pdw_80000000[0])
);
for (i = 0; i < blocksize; i += 4) {
- __asm__ volatile(
- "movaps %0, %%xmm0 \n\t"
- "movaps %1, %%xmm1 \n\t"
+ __asm__ volatile (
+ "movaps %0, %%xmm0 \n\t"
+ "movaps %1, %%xmm1 \n\t"
"xorps %%xmm2, %%xmm2 \n\t"
"xorps %%xmm3, %%xmm3 \n\t"
"cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
@@ -2199,12 +2213,12 @@ static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
"movaps %%xmm3, %%xmm4 \n\t"
"andps %%xmm1, %%xmm3 \n\t"
"andnps %%xmm1, %%xmm4 \n\t"
- "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
- "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
+ "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
+ "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
"movaps %%xmm3, %1 \n\t"
"movaps %%xmm0, %0 \n\t"
- :"+m"(mag[i]), "+m"(ang[i])
- ::"memory"
+ : "+m"(mag[i]), "+m"(ang[i])
+ :: "memory"
);
}
}
@@ -2212,67 +2226,68 @@ static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
#define IF1(x) x
#define IF0(x)
-#define MIX5(mono,stereo)\
- __asm__ volatile(\
- "movss 0(%2), %%xmm5 \n"\
- "movss 8(%2), %%xmm6 \n"\
- "movss 24(%2), %%xmm7 \n"\
- "shufps $0, %%xmm5, %%xmm5 \n"\
- "shufps $0, %%xmm6, %%xmm6 \n"\
- "shufps $0, %%xmm7, %%xmm7 \n"\
- "1: \n"\
- "movaps (%0,%1), %%xmm0 \n"\
- "movaps 0x400(%0,%1), %%xmm1 \n"\
- "movaps 0x800(%0,%1), %%xmm2 \n"\
- "movaps 0xc00(%0,%1), %%xmm3 \n"\
- "movaps 0x1000(%0,%1), %%xmm4 \n"\
- "mulps %%xmm5, %%xmm0 \n"\
- "mulps %%xmm6, %%xmm1 \n"\
- "mulps %%xmm5, %%xmm2 \n"\
- "mulps %%xmm7, %%xmm3 \n"\
- "mulps %%xmm7, %%xmm4 \n"\
- stereo("addps %%xmm1, %%xmm0 \n")\
- "addps %%xmm1, %%xmm2 \n"\
- "addps %%xmm3, %%xmm0 \n"\
- "addps %%xmm4, %%xmm2 \n"\
- mono("addps %%xmm2, %%xmm0 \n")\
- "movaps %%xmm0, (%0,%1) \n"\
- stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
- "add $16, %0 \n"\
- "jl 1b \n"\
- :"+&r"(i)\
- :"r"(samples[0]+len), "r"(matrix)\
- :XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
- "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
- "memory"\
+#define MIX5(mono, stereo) \
+ __asm__ volatile ( \
+ "movss 0(%2), %%xmm5 \n" \
+ "movss 8(%2), %%xmm6 \n" \
+ "movss 24(%2), %%xmm7 \n" \
+ "shufps $0, %%xmm5, %%xmm5 \n" \
+ "shufps $0, %%xmm6, %%xmm6 \n" \
+ "shufps $0, %%xmm7, %%xmm7 \n" \
+ "1: \n" \
+ "movaps (%0, %1), %%xmm0 \n" \
+ "movaps 0x400(%0, %1), %%xmm1 \n" \
+ "movaps 0x800(%0, %1), %%xmm2 \n" \
+ "movaps 0xc00(%0, %1), %%xmm3 \n" \
+ "movaps 0x1000(%0, %1), %%xmm4 \n" \
+ "mulps %%xmm5, %%xmm0 \n" \
+ "mulps %%xmm6, %%xmm1 \n" \
+ "mulps %%xmm5, %%xmm2 \n" \
+ "mulps %%xmm7, %%xmm3 \n" \
+ "mulps %%xmm7, %%xmm4 \n" \
+ stereo("addps %%xmm1, %%xmm0 \n") \
+ "addps %%xmm1, %%xmm2 \n" \
+ "addps %%xmm3, %%xmm0 \n" \
+ "addps %%xmm4, %%xmm2 \n" \
+ mono("addps %%xmm2, %%xmm0 \n") \
+ "movaps %%xmm0, (%0, %1) \n" \
+ stereo("movaps %%xmm2, 0x400(%0, %1) \n") \
+ "add $16, %0 \n" \
+ "jl 1b \n" \
+ : "+&r"(i) \
+ : "r"(samples[0] + len), "r"(matrix) \
+ : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
+ "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
+ "memory" \
);
-#define MIX_MISC(stereo)\
- __asm__ volatile(\
- "1: \n"\
- "movaps (%3,%0), %%xmm0 \n"\
- stereo("movaps %%xmm0, %%xmm1 \n")\
- "mulps %%xmm4, %%xmm0 \n"\
- stereo("mulps %%xmm5, %%xmm1 \n")\
- "lea 1024(%3,%0), %1 \n"\
- "mov %5, %2 \n"\
- "2: \n"\
- "movaps (%1), %%xmm2 \n"\
- stereo("movaps %%xmm2, %%xmm3 \n")\
- "mulps (%4,%2), %%xmm2 \n"\
- stereo("mulps 16(%4,%2), %%xmm3 \n")\
- "addps %%xmm2, %%xmm0 \n"\
- stereo("addps %%xmm3, %%xmm1 \n")\
- "add $1024, %1 \n"\
- "add $32, %2 \n"\
- "jl 2b \n"\
- "movaps %%xmm0, (%3,%0) \n"\
- stereo("movaps %%xmm1, 1024(%3,%0) \n")\
- "add $16, %0 \n"\
- "jl 1b \n"\
- :"+&r"(i), "=&r"(j), "=&r"(k)\
- :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
- :"memory"\
+#define MIX_MISC(stereo) \
+ __asm__ volatile ( \
+ "1: \n" \
+ "movaps (%3, %0), %%xmm0 \n" \
+ stereo("movaps %%xmm0, %%xmm1 \n") \
+ "mulps %%xmm4, %%xmm0 \n" \
+ stereo("mulps %%xmm5, %%xmm1 \n") \
+ "lea 1024(%3, %0), %1 \n" \
+ "mov %5, %2 \n" \
+ "2: \n" \
+ "movaps (%1), %%xmm2 \n" \
+ stereo("movaps %%xmm2, %%xmm3 \n") \
+ "mulps (%4, %2), %%xmm2 \n" \
+ stereo("mulps 16(%4, %2), %%xmm3 \n") \
+ "addps %%xmm2, %%xmm0 \n" \
+ stereo("addps %%xmm3, %%xmm1 \n") \
+ "add $1024, %1 \n" \
+ "add $32, %2 \n" \
+ "jl 2b \n" \
+ "movaps %%xmm0, (%3, %0) \n" \
+ stereo("movaps %%xmm1, 1024(%3, %0) \n") \
+ "add $16, %0 \n" \
+ "jl 1b \n" \
+ : "+&r"(i), "=&r"(j), "=&r"(k) \
+ : "r"(samples[0] + len), "r"(matrix_simd + in_ch), \
+ "g"((intptr_t) - 32 * (in_ch - 1)) \
+ : "memory" \
);
static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2],
@@ -2295,19 +2310,19 @@ static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2],
} else {
DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
j = 2 * in_ch * sizeof(float);
- __asm__ volatile(
- "1: \n"
- "sub $8, %0 \n"
- "movss (%2,%0), %%xmm4 \n"
- "movss 4(%2,%0), %%xmm5 \n"
- "shufps $0, %%xmm4, %%xmm4 \n"
- "shufps $0, %%xmm5, %%xmm5 \n"
- "movaps %%xmm4, (%1,%0,4) \n"
- "movaps %%xmm5, 16(%1,%0,4) \n"
- "jg 1b \n"
- :"+&r"(j)
- :"r"(matrix_simd), "r"(matrix)
- :"memory"
+ __asm__ volatile (
+ "1: \n"
+ "sub $8, %0 \n"
+ "movss (%2, %0), %%xmm4 \n"
+ "movss 4(%2, %0), %%xmm5 \n"
+ "shufps $0, %%xmm4, %%xmm4 \n"
+ "shufps $0, %%xmm5, %%xmm5 \n"
+ "movaps %%xmm4, (%1, %0, 4) \n"
+ "movaps %%xmm5, 16(%1, %0, 4) \n"
+ "jg 1b \n"
+ : "+&r"(j)
+ : "r"(matrix_simd), "r"(matrix)
+ : "memory"
);
if (out_ch == 2) {
MIX_MISC(IF1);
@@ -2321,20 +2336,20 @@ static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1,
int len)
{
x86_reg i = (len - 4) * 4;
- __asm__ volatile(
- "1: \n\t"
- "movq (%2,%0), %%mm0 \n\t"
- "movq 8(%2,%0), %%mm1 \n\t"
- "pfmul (%3,%0), %%mm0 \n\t"
- "pfmul 8(%3,%0), %%mm1 \n\t"
- "movq %%mm0, (%1,%0) \n\t"
- "movq %%mm1, 8(%1,%0) \n\t"
- "sub $16, %0 \n\t"
- "jge 1b \n\t"
- "femms \n\t"
- :"+r"(i)
- :"r"(dst), "r"(src0), "r"(src1)
- :"memory"
+ __asm__ volatile (
+ "1: \n\t"
+ "movq (%2, %0), %%mm0 \n\t"
+ "movq 8(%2, %0), %%mm1 \n\t"
+ "pfmul (%3, %0), %%mm0 \n\t"
+ "pfmul 8(%3, %0), %%mm1 \n\t"
+ "movq %%mm0, (%1, %0) \n\t"
+ "movq %%mm1, 8(%1, %0) \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ "femms \n\t"
+ : "+r"(i)
+ : "r"(dst), "r"(src0), "r"(src1)
+ : "memory"
);
}
@@ -2342,19 +2357,19 @@ static void vector_fmul_sse(float *dst, const float *src0, const float *src1,
int len)
{
x86_reg i = (len - 8) * 4;
- __asm__ volatile(
- "1: \n\t"
- "movaps (%2,%0), %%xmm0 \n\t"
- "movaps 16(%2,%0), %%xmm1 \n\t"
- "mulps (%3,%0), %%xmm0 \n\t"
- "mulps 16(%3,%0), %%xmm1 \n\t"
- "movaps %%xmm0, (%1,%0) \n\t"
- "movaps %%xmm1, 16(%1,%0) \n\t"
- "sub $32, %0 \n\t"
- "jge 1b \n\t"
- :"+r"(i)
- :"r"(dst), "r"(src0), "r"(src1)
- :"memory"
+ __asm__ volatile (
+ "1: \n\t"
+ "movaps (%2, %0), %%xmm0 \n\t"
+ "movaps 16(%2, %0), %%xmm1 \n\t"
+ "mulps (%3, %0), %%xmm0 \n\t"
+ "mulps 16(%3, %0), %%xmm1 \n\t"
+ "movaps %%xmm0, (%1, %0) \n\t"
+ "movaps %%xmm1, 16(%1, %0) \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ : "+r"(i)
+ : "r"(dst), "r"(src0), "r"(src1)
+ : "memory"
);
}
@@ -2362,42 +2377,42 @@ static void vector_fmul_reverse_3dnow2(float *dst, const float *src0,
const float *src1, int len)
{
x86_reg i = len * 4 - 16;
- __asm__ volatile(
- "1: \n\t"
- "pswapd 8(%1), %%mm0 \n\t"
- "pswapd (%1), %%mm1 \n\t"
- "pfmul (%3,%0), %%mm0 \n\t"
- "pfmul 8(%3,%0), %%mm1 \n\t"
- "movq %%mm0, (%2,%0) \n\t"
- "movq %%mm1, 8(%2,%0) \n\t"
- "add $16, %1 \n\t"
- "sub $16, %0 \n\t"
- "jge 1b \n\t"
- :"+r"(i), "+r"(src1)
- :"r"(dst), "r"(src0)
+ __asm__ volatile (
+ "1: \n\t"
+ "pswapd 8(%1), %%mm0 \n\t"
+ "pswapd (%1), %%mm1 \n\t"
+ "pfmul (%3, %0), %%mm0 \n\t"
+ "pfmul 8(%3, %0), %%mm1 \n\t"
+ "movq %%mm0, (%2, %0) \n\t"
+ "movq %%mm1, 8(%2, %0) \n\t"
+ "add $16, %1 \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ : "+r"(i), "+r"(src1)
+ : "r"(dst), "r"(src0)
);
- __asm__ volatile("femms");
+ __asm__ volatile ("femms");
}
static void vector_fmul_reverse_sse(float *dst, const float *src0,
const float *src1, int len)
{
x86_reg i = len * 4 - 32;
- __asm__ volatile(
- "1: \n\t"
- "movaps 16(%1), %%xmm0 \n\t"
- "movaps (%1), %%xmm1 \n\t"
- "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
- "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
- "mulps (%3,%0), %%xmm0 \n\t"
- "mulps 16(%3,%0), %%xmm1 \n\t"
- "movaps %%xmm0, (%2,%0) \n\t"
- "movaps %%xmm1, 16(%2,%0) \n\t"
- "add $32, %1 \n\t"
- "sub $32, %0 \n\t"
- "jge 1b \n\t"
- :"+r"(i), "+r"(src1)
- :"r"(dst), "r"(src0)
+ __asm__ volatile (
+ "1: \n\t"
+ "movaps 16(%1), %%xmm0 \n\t"
+ "movaps (%1), %%xmm1 \n\t"
+ "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
+ "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
+ "mulps (%3, %0), %%xmm0 \n\t"
+ "mulps 16(%3, %0), %%xmm1 \n\t"
+ "movaps %%xmm0, (%2, %0) \n\t"
+ "movaps %%xmm1, 16(%2, %0) \n\t"
+ "add $32, %1 \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ : "+r"(i), "+r"(src1)
+ : "r"(dst), "r"(src0)
);
}
@@ -2405,44 +2420,44 @@ static void vector_fmul_add_3dnow(float *dst, const float *src0,
const float *src1, const float *src2, int len)
{
x86_reg i = (len - 4) * 4;
- __asm__ volatile(
- "1: \n\t"
- "movq (%2,%0), %%mm0 \n\t"
- "movq 8(%2,%0), %%mm1 \n\t"
- "pfmul (%3,%0), %%mm0 \n\t"
- "pfmul 8(%3,%0), %%mm1 \n\t"
- "pfadd (%4,%0), %%mm0 \n\t"
- "pfadd 8(%4,%0), %%mm1 \n\t"
- "movq %%mm0, (%1,%0) \n\t"
- "movq %%mm1, 8(%1,%0) \n\t"
- "sub $16, %0 \n\t"
- "jge 1b \n\t"
- :"+r"(i)
- :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
- :"memory"
+ __asm__ volatile (
+ "1: \n\t"
+ "movq (%2, %0), %%mm0 \n\t"
+ "movq 8(%2, %0), %%mm1 \n\t"
+ "pfmul (%3, %0), %%mm0 \n\t"
+ "pfmul 8(%3, %0), %%mm1 \n\t"
+ "pfadd (%4, %0), %%mm0 \n\t"
+ "pfadd 8(%4, %0), %%mm1 \n\t"
+ "movq %%mm0, (%1, %0) \n\t"
+ "movq %%mm1, 8(%1, %0) \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ : "+r"(i)
+ : "r"(dst), "r"(src0), "r"(src1), "r"(src2)
+ : "memory"
);
- __asm__ volatile("femms");
+ __asm__ volatile ("femms");
}
static void vector_fmul_add_sse(float *dst, const float *src0,
const float *src1, const float *src2, int len)
{
x86_reg i = (len - 8) * 4;
- __asm__ volatile(
- "1: \n\t"
- "movaps (%2,%0), %%xmm0 \n\t"
- "movaps 16(%2,%0), %%xmm1 \n\t"
- "mulps (%3,%0), %%xmm0 \n\t"
- "mulps 16(%3,%0), %%xmm1 \n\t"
- "addps (%4,%0), %%xmm0 \n\t"
- "addps 16(%4,%0), %%xmm1 \n\t"
- "movaps %%xmm0, (%1,%0) \n\t"
- "movaps %%xmm1, 16(%1,%0) \n\t"
- "sub $32, %0 \n\t"
- "jge 1b \n\t"
- :"+r"(i)
- :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
- :"memory"
+ __asm__ volatile (
+ "1: \n\t"
+ "movaps (%2, %0), %%xmm0 \n\t"
+ "movaps 16(%2, %0), %%xmm1 \n\t"
+ "mulps (%3, %0), %%xmm0 \n\t"
+ "mulps 16(%3, %0), %%xmm1 \n\t"
+ "addps (%4, %0), %%xmm0 \n\t"
+ "addps 16(%4, %0), %%xmm1 \n\t"
+ "movaps %%xmm0, (%1, %0) \n\t"
+ "movaps %%xmm1, 16(%1, %0) \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ : "+r"(i)
+ : "r"(dst), "r"(src0), "r"(src1), "r"(src2)
+ : "memory"
);
}
@@ -2453,29 +2468,29 @@ static void vector_fmul_window_3dnow2(float *dst, const float *src0,
{
x86_reg i = -len * 4;
x86_reg j = len * 4 - 8;
- __asm__ volatile(
- "1: \n"
- "pswapd (%5,%1), %%mm1 \n"
- "movq (%5,%0), %%mm0 \n"
- "pswapd (%4,%1), %%mm5 \n"
- "movq (%3,%0), %%mm4 \n"
- "movq %%mm0, %%mm2 \n"
- "movq %%mm1, %%mm3 \n"
- "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
- "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
- "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
- "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
- "pfadd %%mm3, %%mm2 \n"
- "pfsub %%mm0, %%mm1 \n"
- "pswapd %%mm2, %%mm2 \n"
- "movq %%mm1, (%2,%0) \n"
- "movq %%mm2, (%2,%1) \n"
- "sub $8, %1 \n"
- "add $8, %0 \n"
- "jl 1b \n"
- "femms \n"
- :"+r"(i), "+r"(j)
- :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
+ __asm__ volatile (
+ "1: \n"
+ "pswapd (%5, %1), %%mm1 \n"
+ "movq (%5, %0), %%mm0 \n"
+ "pswapd (%4, %1), %%mm5 \n"
+ "movq (%3, %0), %%mm4 \n"
+ "movq %%mm0, %%mm2 \n"
+ "movq %%mm1, %%mm3 \n"
+ "pfmul %%mm4, %%mm2 \n" // src0[len + i] * win[len + i]
+ "pfmul %%mm5, %%mm3 \n" // src1[j] * win[len + j]
+ "pfmul %%mm4, %%mm1 \n" // src0[len + i] * win[len + j]
+ "pfmul %%mm5, %%mm0 \n" // src1[j] * win[len + i]
+ "pfadd %%mm3, %%mm2 \n"
+ "pfsub %%mm0, %%mm1 \n"
+ "pswapd %%mm2, %%mm2 \n"
+ "movq %%mm1, (%2, %0) \n"
+ "movq %%mm2, (%2, %1) \n"
+ "sub $8, %1 \n"
+ "add $8, %0 \n"
+ "jl 1b \n"
+ "femms \n"
+ : "+r"(i), "+r"(j)
+ : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
);
}
@@ -2484,30 +2499,30 @@ static void vector_fmul_window_sse(float *dst, const float *src0,
{
x86_reg i = -len * 4;
x86_reg j = len * 4 - 16;
- __asm__ volatile(
- "1: \n"
- "movaps (%5,%1), %%xmm1 \n"
- "movaps (%5,%0), %%xmm0 \n"
- "movaps (%4,%1), %%xmm5 \n"
- "movaps (%3,%0), %%xmm4 \n"
- "shufps $0x1b, %%xmm1, %%xmm1 \n"
- "shufps $0x1b, %%xmm5, %%xmm5 \n"
- "movaps %%xmm0, %%xmm2 \n"
- "movaps %%xmm1, %%xmm3 \n"
- "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
- "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
- "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
- "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
- "addps %%xmm3, %%xmm2 \n"
- "subps %%xmm0, %%xmm1 \n"
- "shufps $0x1b, %%xmm2, %%xmm2 \n"
- "movaps %%xmm1, (%2,%0) \n"
- "movaps %%xmm2, (%2,%1) \n"
- "sub $16, %1 \n"
- "add $16, %0 \n"
- "jl 1b \n"
- :"+r"(i), "+r"(j)
- :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
+ __asm__ volatile (
+ "1: \n"
+ "movaps (%5, %1), %%xmm1 \n"
+ "movaps (%5, %0), %%xmm0 \n"
+ "movaps (%4, %1), %%xmm5 \n"
+ "movaps (%3, %0), %%xmm4 \n"
+ "shufps $0x1b, %%xmm1, %%xmm1 \n"
+ "shufps $0x1b, %%xmm5, %%xmm5 \n"
+ "movaps %%xmm0, %%xmm2 \n"
+ "movaps %%xmm1, %%xmm3 \n"
+ "mulps %%xmm4, %%xmm2 \n" // src0[len + i] * win[len + i]
+ "mulps %%xmm5, %%xmm3 \n" // src1[j] * win[len + j]
+ "mulps %%xmm4, %%xmm1 \n" // src0[len + i] * win[len + j]
+ "mulps %%xmm5, %%xmm0 \n" // src1[j] * win[len + i]
+ "addps %%xmm3, %%xmm2 \n"
+ "subps %%xmm0, %%xmm1 \n"
+ "shufps $0x1b, %%xmm2, %%xmm2 \n"
+ "movaps %%xmm1, (%2, %0) \n"
+ "movaps %%xmm2, (%2, %1) \n"
+ "sub $16, %1 \n"
+ "add $16, %0 \n"
+ "jl 1b \n"
+ : "+r"(i), "+r"(j)
+ : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
);
}
#endif /* HAVE_6REGS */
@@ -2516,33 +2531,33 @@ static void vector_clipf_sse(float *dst, const float *src,
float min, float max, int len)
{
x86_reg i = (len - 16) * 4;
- __asm__ volatile(
- "movss %3, %%xmm4 \n"
- "movss %4, %%xmm5 \n"
- "shufps $0, %%xmm4, %%xmm4 \n"
- "shufps $0, %%xmm5, %%xmm5 \n"
- "1: \n\t"
- "movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel
- "movaps 16(%2,%0), %%xmm1 \n\t"
- "movaps 32(%2,%0), %%xmm2 \n\t"
- "movaps 48(%2,%0), %%xmm3 \n\t"
- "maxps %%xmm4, %%xmm0 \n\t"
- "maxps %%xmm4, %%xmm1 \n\t"
- "maxps %%xmm4, %%xmm2 \n\t"
- "maxps %%xmm4, %%xmm3 \n\t"
- "minps %%xmm5, %%xmm0 \n\t"
- "minps %%xmm5, %%xmm1 \n\t"
- "minps %%xmm5, %%xmm2 \n\t"
- "minps %%xmm5, %%xmm3 \n\t"
- "movaps %%xmm0, (%1,%0) \n\t"
- "movaps %%xmm1, 16(%1,%0) \n\t"
- "movaps %%xmm2, 32(%1,%0) \n\t"
- "movaps %%xmm3, 48(%1,%0) \n\t"
- "sub $64, %0 \n\t"
- "jge 1b \n\t"
- :"+&r"(i)
- :"r"(dst), "r"(src), "m"(min), "m"(max)
- :"memory"
+ __asm__ volatile (
+ "movss %3, %%xmm4 \n\t"
+ "movss %4, %%xmm5 \n\t"
+ "shufps $0, %%xmm4, %%xmm4 \n\t"
+ "shufps $0, %%xmm5, %%xmm5 \n\t"
+ "1: \n\t"
+ "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
+ "movaps 16(%2, %0), %%xmm1 \n\t"
+ "movaps 32(%2, %0), %%xmm2 \n\t"
+ "movaps 48(%2, %0), %%xmm3 \n\t"
+ "maxps %%xmm4, %%xmm0 \n\t"
+ "maxps %%xmm4, %%xmm1 \n\t"
+ "maxps %%xmm4, %%xmm2 \n\t"
+ "maxps %%xmm4, %%xmm3 \n\t"
+ "minps %%xmm5, %%xmm0 \n\t"
+ "minps %%xmm5, %%xmm1 \n\t"
+ "minps %%xmm5, %%xmm2 \n\t"
+ "minps %%xmm5, %%xmm3 \n\t"
+ "movaps %%xmm0, (%1, %0) \n\t"
+ "movaps %%xmm1, 16(%1, %0) \n\t"
+ "movaps %%xmm2, 32(%1, %0) \n\t"
+ "movaps %%xmm3, 48(%1, %0) \n\t"
+ "sub $64, %0 \n\t"
+ "jge 1b \n\t"
+ : "+&r"(i)
+ : "r"(dst), "r"(src), "m"(min), "m"(max)
+ : "memory"
);
}