aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/i386
diff options
context:
space:
mode:
authorAurelien Jacobs <aurel@gnuage.org>2008-02-25 23:14:22 +0000
committerAurelien Jacobs <aurel@gnuage.org>2008-02-25 23:14:22 +0000
commit97d1d009e21f1a8ed33f767c31f02ee7d7524e9c (patch)
treed70ed53a77ac7c02dd888f32d0319a1cd7e483f7 /libavcodec/i386
parent4847a997aa5843fa629f6db6df01bf319110a396 (diff)
downloadffmpeg-97d1d009e21f1a8ed33f767c31f02ee7d7524e9c.tar.gz
split encoding part of dsputil_mmx into its own file
Originally committed as revision 12223 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/i386')
-rw-r--r--libavcodec/i386/dsputil_mmx.c1412
-rw-r--r--libavcodec/i386/dsputil_mmx.h9
-rw-r--r--libavcodec/i386/dsputilenc_mmx.c1422
3 files changed, 1434 insertions, 1409 deletions
diff --git a/libavcodec/i386/dsputil_mmx.c b/libavcodec/i386/dsputil_mmx.c
index ff5b6c63ff..30e938312d 100644
--- a/libavcodec/i386/dsputil_mmx.c
+++ b/libavcodec/i386/dsputil_mmx.c
@@ -74,11 +74,6 @@ DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
#define JUMPALIGN() asm volatile (ASMALIGN(3)::)
#define MOVQ_ZERO(regd) asm volatile ("pxor %%" #regd ", %%" #regd ::)
-#define MOVQ_WONE(regd) \
- asm volatile ( \
- "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
- "psrlw $15, %%" #regd ::)
-
#define MOVQ_BFE(regd) \
asm volatile ( \
"pcmpeqd %%" #regd ", %%" #regd " \n\t"\
@@ -220,65 +215,6 @@ DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
/***********************************/
/* standard MMX */
-#ifdef CONFIG_ENCODERS
-static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
-{
- asm volatile(
- "mov $-128, %%"REG_a" \n\t"
- "pxor %%mm7, %%mm7 \n\t"
- ASMALIGN(4)
- "1: \n\t"
- "movq (%0), %%mm0 \n\t"
- "movq (%0, %2), %%mm2 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpckhbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm3 \n\t"
- "movq %%mm0, (%1, %%"REG_a") \n\t"
- "movq %%mm1, 8(%1, %%"REG_a") \n\t"
- "movq %%mm2, 16(%1, %%"REG_a") \n\t"
- "movq %%mm3, 24(%1, %%"REG_a") \n\t"
- "add %3, %0 \n\t"
- "add $32, %%"REG_a" \n\t"
- "js 1b \n\t"
- : "+r" (pixels)
- : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
- : "%"REG_a
- );
-}
-
-static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
-{
- asm volatile(
- "pxor %%mm7, %%mm7 \n\t"
- "mov $-128, %%"REG_a" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- "movq (%0), %%mm0 \n\t"
- "movq (%1), %%mm2 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpckhbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm3 \n\t"
- "psubw %%mm2, %%mm0 \n\t"
- "psubw %%mm3, %%mm1 \n\t"
- "movq %%mm0, (%2, %%"REG_a") \n\t"
- "movq %%mm1, 8(%2, %%"REG_a") \n\t"
- "add %3, %0 \n\t"
- "add %3, %1 \n\t"
- "add $16, %%"REG_a" \n\t"
- "jnz 1b \n\t"
- : "+r" (s1), "+r" (s2)
- : "r" (block+64), "r" ((long)stride)
- : "%"REG_a
- );
-}
-#endif //CONFIG_ENCODERS
-
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
{
const DCTELEM *p;
@@ -544,46 +480,6 @@ static void clear_blocks_mmx(DCTELEM *blocks)
);
}
-#ifdef CONFIG_ENCODERS
-static int pix_sum16_mmx(uint8_t * pix, int line_size){
- const int h=16;
- int sum;
- long index= -line_size*h;
-
- asm volatile(
- "pxor %%mm7, %%mm7 \n\t"
- "pxor %%mm6, %%mm6 \n\t"
- "1: \n\t"
- "movq (%2, %1), %%mm0 \n\t"
- "movq (%2, %1), %%mm1 \n\t"
- "movq 8(%2, %1), %%mm2 \n\t"
- "movq 8(%2, %1), %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpckhbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm3 \n\t"
- "paddw %%mm0, %%mm1 \n\t"
- "paddw %%mm2, %%mm3 \n\t"
- "paddw %%mm1, %%mm3 \n\t"
- "paddw %%mm3, %%mm6 \n\t"
- "add %3, %1 \n\t"
- " js 1b \n\t"
- "movq %%mm6, %%mm5 \n\t"
- "psrlq $32, %%mm6 \n\t"
- "paddw %%mm5, %%mm6 \n\t"
- "movq %%mm6, %%mm5 \n\t"
- "psrlq $16, %%mm6 \n\t"
- "paddw %%mm5, %%mm6 \n\t"
- "movd %%mm6, %0 \n\t"
- "andl $0xFFFF, %0 \n\t"
- : "=&r" (sum), "+r" (index)
- : "r" (pix - index), "r" ((long)line_size)
- );
-
- return sum;
-}
-#endif //CONFIG_ENCODERS
-
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
long i=0;
asm volatile(
@@ -800,791 +696,6 @@ static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
}
}
-#ifdef CONFIG_ENCODERS
-static int pix_norm1_mmx(uint8_t *pix, int line_size) {
- int tmp;
- asm volatile (
- "movl $16,%%ecx\n"
- "pxor %%mm0,%%mm0\n"
- "pxor %%mm7,%%mm7\n"
- "1:\n"
- "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
- "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
-
- "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
-
- "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
- "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
-
- "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
- "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
- "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
-
- "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
- "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
-
- "pmaddwd %%mm3,%%mm3\n"
- "pmaddwd %%mm4,%%mm4\n"
-
- "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
- pix2^2+pix3^2+pix6^2+pix7^2) */
- "paddd %%mm3,%%mm4\n"
- "paddd %%mm2,%%mm7\n"
-
- "add %2, %0\n"
- "paddd %%mm4,%%mm7\n"
- "dec %%ecx\n"
- "jnz 1b\n"
-
- "movq %%mm7,%%mm1\n"
- "psrlq $32, %%mm7\n" /* shift hi dword to lo */
- "paddd %%mm7,%%mm1\n"
- "movd %%mm1,%1\n"
- : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
- return tmp;
-}
-
-static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- int tmp;
- asm volatile (
- "movl %4,%%ecx\n"
- "shr $1,%%ecx\n"
- "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
- "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
- "1:\n"
- "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
- "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
- "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
- "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
-
- /* todo: mm1-mm2, mm3-mm4 */
- /* algo: subtract mm1 from mm2 with saturation and vice versa */
- /* OR the results to get absolute difference */
- "movq %%mm1,%%mm5\n"
- "movq %%mm3,%%mm6\n"
- "psubusb %%mm2,%%mm1\n"
- "psubusb %%mm4,%%mm3\n"
- "psubusb %%mm5,%%mm2\n"
- "psubusb %%mm6,%%mm4\n"
-
- "por %%mm1,%%mm2\n"
- "por %%mm3,%%mm4\n"
-
- /* now convert to 16-bit vectors so we can square them */
- "movq %%mm2,%%mm1\n"
- "movq %%mm4,%%mm3\n"
-
- "punpckhbw %%mm0,%%mm2\n"
- "punpckhbw %%mm0,%%mm4\n"
- "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
- "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
-
- "pmaddwd %%mm2,%%mm2\n"
- "pmaddwd %%mm4,%%mm4\n"
- "pmaddwd %%mm1,%%mm1\n"
- "pmaddwd %%mm3,%%mm3\n"
-
- "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
- "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
-
- "paddd %%mm2,%%mm1\n"
- "paddd %%mm4,%%mm3\n"
- "paddd %%mm1,%%mm7\n"
- "paddd %%mm3,%%mm7\n"
-
- "decl %%ecx\n"
- "jnz 1b\n"
-
- "movq %%mm7,%%mm1\n"
- "psrlq $32, %%mm7\n" /* shift hi dword to lo */
- "paddd %%mm7,%%mm1\n"
- "movd %%mm1,%2\n"
- : "+r" (pix1), "+r" (pix2), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp;
-}
-
-static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- int tmp;
- asm volatile (
- "movl %4,%%ecx\n"
- "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
- "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
- "1:\n"
- "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
- "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
- "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
- "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
-
- /* todo: mm1-mm2, mm3-mm4 */
- /* algo: subtract mm1 from mm2 with saturation and vice versa */
- /* OR the results to get absolute difference */
- "movq %%mm1,%%mm5\n"
- "movq %%mm3,%%mm6\n"
- "psubusb %%mm2,%%mm1\n"
- "psubusb %%mm4,%%mm3\n"
- "psubusb %%mm5,%%mm2\n"
- "psubusb %%mm6,%%mm4\n"
-
- "por %%mm1,%%mm2\n"
- "por %%mm3,%%mm4\n"
-
- /* now convert to 16-bit vectors so we can square them */
- "movq %%mm2,%%mm1\n"
- "movq %%mm4,%%mm3\n"
-
- "punpckhbw %%mm0,%%mm2\n"
- "punpckhbw %%mm0,%%mm4\n"
- "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
- "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
-
- "pmaddwd %%mm2,%%mm2\n"
- "pmaddwd %%mm4,%%mm4\n"
- "pmaddwd %%mm1,%%mm1\n"
- "pmaddwd %%mm3,%%mm3\n"
-
- "add %3,%0\n"
- "add %3,%1\n"
-
- "paddd %%mm2,%%mm1\n"
- "paddd %%mm4,%%mm3\n"
- "paddd %%mm1,%%mm7\n"
- "paddd %%mm3,%%mm7\n"
-
- "decl %%ecx\n"
- "jnz 1b\n"
-
- "movq %%mm7,%%mm1\n"
- "psrlq $32, %%mm7\n" /* shift hi dword to lo */
- "paddd %%mm7,%%mm1\n"
- "movd %%mm1,%2\n"
- : "+r" (pix1), "+r" (pix2), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp;
-}
-
-static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- int tmp;
- asm volatile (
- "shr $1,%2\n"
- "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
- "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
- "1:\n"
- "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
- "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
- "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
- "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
-
- /* todo: mm1-mm2, mm3-mm4 */
- /* algo: subtract mm1 from mm2 with saturation and vice versa */
- /* OR the results to get absolute difference */
- "movdqa %%xmm1,%%xmm5\n"
- "movdqa %%xmm3,%%xmm6\n"
- "psubusb %%xmm2,%%xmm1\n"
- "psubusb %%xmm4,%%xmm3\n"
- "psubusb %%xmm5,%%xmm2\n"
- "psubusb %%xmm6,%%xmm4\n"
-
- "por %%xmm1,%%xmm2\n"
- "por %%xmm3,%%xmm4\n"
-
- /* now convert to 16-bit vectors so we can square them */
- "movdqa %%xmm2,%%xmm1\n"
- "movdqa %%xmm4,%%xmm3\n"
-
- "punpckhbw %%xmm0,%%xmm2\n"
- "punpckhbw %%xmm0,%%xmm4\n"
- "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
- "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
-
- "pmaddwd %%xmm2,%%xmm2\n"
- "pmaddwd %%xmm4,%%xmm4\n"
- "pmaddwd %%xmm1,%%xmm1\n"
- "pmaddwd %%xmm3,%%xmm3\n"
-
- "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
- "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
-
- "paddd %%xmm2,%%xmm1\n"
- "paddd %%xmm4,%%xmm3\n"
- "paddd %%xmm1,%%xmm7\n"
- "paddd %%xmm3,%%xmm7\n"
-
- "decl %2\n"
- "jnz 1b\n"
-
- "movdqa %%xmm7,%%xmm1\n"
- "psrldq $8, %%xmm7\n" /* shift hi qword to lo */
- "paddd %%xmm1,%%xmm7\n"
- "movdqa %%xmm7,%%xmm1\n"
- "psrldq $4, %%xmm7\n" /* shift hi dword to lo */
- "paddd %%xmm1,%%xmm7\n"
- "movd %%xmm7,%3\n"
- : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
- : "r" ((long)line_size));
- return tmp;
-}
-
-static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
- int tmp;
- asm volatile (
- "movl %3,%%ecx\n"
- "pxor %%mm7,%%mm7\n"
- "pxor %%mm6,%%mm6\n"
-
- "movq (%0),%%mm0\n"
- "movq %%mm0, %%mm1\n"
- "psllq $8, %%mm0\n"
- "psrlq $8, %%mm1\n"
- "psrlq $8, %%mm0\n"
- "movq %%mm0, %%mm2\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7,%%mm0\n"
- "punpcklbw %%mm7,%%mm1\n"
- "punpckhbw %%mm7,%%mm2\n"
- "punpckhbw %%mm7,%%mm3\n"
- "psubw %%mm1, %%mm0\n"
- "psubw %%mm3, %%mm2\n"
-
- "add %2,%0\n"
-
- "movq (%0),%%mm4\n"
- "movq %%mm4, %%mm1\n"
- "psllq $8, %%mm4\n"
- "psrlq $8, %%mm1\n"
- "psrlq $8, %%mm4\n"
- "movq %%mm4, %%mm5\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7,%%mm4\n"
- "punpcklbw %%mm7,%%mm1\n"
- "punpckhbw %%mm7,%%mm5\n"
- "punpckhbw %%mm7,%%mm3\n"
- "psubw %%mm1, %%mm4\n"
- "psubw %%mm3, %%mm5\n"
- "psubw %%mm4, %%mm0\n"
- "psubw %%mm5, %%mm2\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm0, %%mm3\n\t"
- "pcmpgtw %%mm2, %%mm1\n\t"
- "pxor %%mm3, %%mm0\n"
- "pxor %%mm1, %%mm2\n"
- "psubw %%mm3, %%mm0\n"
- "psubw %%mm1, %%mm2\n"
- "paddw %%mm0, %%mm2\n"
- "paddw %%mm2, %%mm6\n"
-
- "add %2,%0\n"
- "1:\n"
-
- "movq (%0),%%mm0\n"
- "movq %%mm0, %%mm1\n"
- "psllq $8, %%mm0\n"
- "psrlq $8, %%mm1\n"
- "psrlq $8, %%mm0\n"
- "movq %%mm0, %%mm2\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7,%%mm0\n"
- "punpcklbw %%mm7,%%mm1\n"
- "punpckhbw %%mm7,%%mm2\n"
- "punpckhbw %%mm7,%%mm3\n"
- "psubw %%mm1, %%mm0\n"
- "psubw %%mm3, %%mm2\n"
- "psubw %%mm0, %%mm4\n"
- "psubw %%mm2, %%mm5\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm4, %%mm3\n\t"
- "pcmpgtw %%mm5, %%mm1\n\t"
- "pxor %%mm3, %%mm4\n"
- "pxor %%mm1, %%mm5\n"
- "psubw %%mm3, %%mm4\n"
- "psubw %%mm1, %%mm5\n"
- "paddw %%mm4, %%mm5\n"
- "paddw %%mm5, %%mm6\n"
-
- "add %2,%0\n"
-
- "movq (%0),%%mm4\n"
- "movq %%mm4, %%mm1\n"
- "psllq $8, %%mm4\n"
- "psrlq $8, %%mm1\n"
- "psrlq $8, %%mm4\n"
- "movq %%mm4, %%mm5\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7,%%mm4\n"
- "punpcklbw %%mm7,%%mm1\n"
- "punpckhbw %%mm7,%%mm5\n"
- "punpckhbw %%mm7,%%mm3\n"
- "psubw %%mm1, %%mm4\n"
- "psubw %%mm3, %%mm5\n"
- "psubw %%mm4, %%mm0\n"
- "psubw %%mm5, %%mm2\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm0, %%mm3\n\t"
- "pcmpgtw %%mm2, %%mm1\n\t"
- "pxor %%mm3, %%mm0\n"
- "pxor %%mm1, %%mm2\n"
- "psubw %%mm3, %%mm0\n"
- "psubw %%mm1, %%mm2\n"
- "paddw %%mm0, %%mm2\n"
- "paddw %%mm2, %%mm6\n"
-
- "add %2,%0\n"
- "subl $2, %%ecx\n"
- " jnz 1b\n"
-
- "movq %%mm6, %%mm0\n"
- "punpcklwd %%mm7,%%mm0\n"
- "punpckhwd %%mm7,%%mm6\n"
- "paddd %%mm0, %%mm6\n"
-
- "movq %%mm6,%%mm0\n"
- "psrlq $32, %%mm6\n"
- "paddd %%mm6,%%mm0\n"
- "movd %%mm0,%1\n"
- : "+r" (pix1), "=r"(tmp)
- : "r" ((long)line_size) , "g" (h-2)
- : "%ecx");
- return tmp;
-}
-
-static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
- int tmp;
- uint8_t * pix= pix1;
- asm volatile (
- "movl %3,%%ecx\n"
- "pxor %%mm7,%%mm7\n"
- "pxor %%mm6,%%mm6\n"
-
- "movq (%0),%%mm0\n"
- "movq 1(%0),%%mm1\n"
- "movq %%mm0, %%mm2\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7,%%mm0\n"
- "punpcklbw %%mm7,%%mm1\n"
- "punpckhbw %%mm7,%%mm2\n"
- "punpckhbw %%mm7,%%mm3\n"
- "psubw %%mm1, %%mm0\n"
- "psubw %%mm3, %%mm2\n"
-
- "add %2,%0\n"
-
- "movq (%0),%%mm4\n"
- "movq 1(%0),%%mm1\n"
- "movq %%mm4, %%mm5\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7,%%mm4\n"
- "punpcklbw %%mm7,%%mm1\n"
- "punpckhbw %%mm7,%%mm5\n"
- "punpckhbw %%mm7,%%mm3\n"
- "psubw %%mm1, %%mm4\n"
- "psubw %%mm3, %%mm5\n"
- "psubw %%mm4, %%mm0\n"
- "psubw %%mm5, %%mm2\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm0, %%mm3\n\t"
- "pcmpgtw %%mm2, %%mm1\n\t"
- "pxor %%mm3, %%mm0\n"
- "pxor %%mm1, %%mm2\n"
- "psubw %%mm3, %%mm0\n"
- "psubw %%mm1, %%mm2\n"
- "paddw %%mm0, %%mm2\n"
- "paddw %%mm2, %%mm6\n"
-
- "add %2,%0\n"
- "1:\n"
-
- "movq (%0),%%mm0\n"
- "movq 1(%0),%%mm1\n"
- "movq %%mm0, %%mm2\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7,%%mm0\n"
- "punpcklbw %%mm7,%%mm1\n"
- "punpckhbw %%mm7,%%mm2\n"
- "punpckhbw %%mm7,%%mm3\n"
- "psubw %%mm1, %%mm0\n"
- "psubw %%mm3, %%mm2\n"
- "psubw %%mm0, %%mm4\n"
- "psubw %%mm2, %%mm5\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm4, %%mm3\n\t"
- "pcmpgtw %%mm5, %%mm1\n\t"
- "pxor %%mm3, %%mm4\n"
- "pxor %%mm1, %%mm5\n"
- "psubw %%mm3, %%mm4\n"
- "psubw %%mm1, %%mm5\n"
- "paddw %%mm4, %%mm5\n"
- "paddw %%mm5, %%mm6\n"
-
- "add %2,%0\n"
-
- "movq (%0),%%mm4\n"
- "movq 1(%0),%%mm1\n"
- "movq %%mm4, %%mm5\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7,%%mm4\n"
- "punpcklbw %%mm7,%%mm1\n"
- "punpckhbw %%mm7,%%mm5\n"
- "punpckhbw %%mm7,%%mm3\n"
- "psubw %%mm1, %%mm4\n"
- "psubw %%mm3, %%mm5\n"
- "psubw %%mm4, %%mm0\n"
- "psubw %%mm5, %%mm2\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm0, %%mm3\n\t"
- "pcmpgtw %%mm2, %%mm1\n\t"
- "pxor %%mm3, %%mm0\n"
- "pxor %%mm1, %%mm2\n"
- "psubw %%mm3, %%mm0\n"
- "psubw %%mm1, %%mm2\n"
- "paddw %%mm0, %%mm2\n"
- "paddw %%mm2, %%mm6\n"
-
- "add %2,%0\n"
- "subl $2, %%ecx\n"
- " jnz 1b\n"
-
- "movq %%mm6, %%mm0\n"
- "punpcklwd %%mm7,%%mm0\n"
- "punpckhwd %%mm7,%%mm6\n"
- "paddd %%mm0, %%mm6\n"
-
- "movq %%mm6,%%mm0\n"
- "psrlq $32, %%mm6\n"
- "paddd %%mm6,%%mm0\n"
- "movd %%mm0,%1\n"
- : "+r" (pix1), "=r"(tmp)
- : "r" ((long)line_size) , "g" (h-2)
- : "%ecx");
- return tmp + hf_noise8_mmx(pix+8, line_size, h);
-}
-
-static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- MpegEncContext *c = p;
- int score1, score2;
-
- if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
- else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
- score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
-
- if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
- else return score1 + FFABS(score2)*8;
-}
-
-static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- MpegEncContext *c = p;
- int score1= sse8_mmx(c, pix1, pix2, line_size, h);
- int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
-
- if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
- else return score1 + FFABS(score2)*8;
-}
-
-static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
- int tmp;
-
- assert( (((int)pix) & 7) == 0);
- assert((line_size &7) ==0);
-
-#define SUM(in0, in1, out0, out1) \
- "movq (%0), %%mm2\n"\
- "movq 8(%0), %%mm3\n"\
- "add %2,%0\n"\
- "movq %%mm2, " #out0 "\n"\
- "movq %%mm3, " #out1 "\n"\
- "psubusb " #in0 ", %%mm2\n"\
- "psubusb " #in1 ", %%mm3\n"\
- "psubusb " #out0 ", " #in0 "\n"\
- "psubusb " #out1 ", " #in1 "\n"\
- "por %%mm2, " #in0 "\n"\
- "por %%mm3, " #in1 "\n"\
- "movq " #in0 ", %%mm2\n"\
- "movq " #in1 ", %%mm3\n"\
- "punpcklbw %%mm7, " #in0 "\n"\
- "punpcklbw %%mm7, " #in1 "\n"\
- "punpckhbw %%mm7, %%mm2\n"\
- "punpckhbw %%mm7, %%mm3\n"\
- "paddw " #in1 ", " #in0 "\n"\
- "paddw %%mm3, %%mm2\n"\
- "paddw %%mm2, " #in0 "\n"\
- "paddw " #in0 ", %%mm6\n"
-
-
- asm volatile (
- "movl %3,%%ecx\n"
- "pxor %%mm6,%%mm6\n"
- "pxor %%mm7,%%mm7\n"
- "movq (%0),%%mm0\n"
- "movq 8(%0),%%mm1\n"
- "add %2,%0\n"
- "subl $2, %%ecx\n"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "1:\n"
-
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
-
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
-
- "subl $2, %%ecx\n"
- "jnz 1b\n"
-
- "movq %%mm6,%%mm0\n"
- "psrlq $32, %%mm6\n"
- "paddw %%mm6,%%mm0\n"
- "movq %%mm0,%%mm6\n"
- "psrlq $16, %%mm0\n"
- "paddw %%mm6,%%mm0\n"
- "movd %%mm0,%1\n"
- : "+r" (pix), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp & 0xFFFF;
-}
-#undef SUM
-
-static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
- int tmp;
-
- assert( (((int)pix) & 7) == 0);
- assert((line_size &7) ==0);
-
-#define SUM(in0, in1, out0, out1) \
- "movq (%0), " #out0 "\n"\
- "movq 8(%0), " #out1 "\n"\
- "add %2,%0\n"\
- "psadbw " #out0 ", " #in0 "\n"\
- "psadbw " #out1 ", " #in1 "\n"\
- "paddw " #in1 ", " #in0 "\n"\
- "paddw " #in0 ", %%mm6\n"
-
- asm volatile (
- "movl %3,%%ecx\n"
- "pxor %%mm6,%%mm6\n"
- "pxor %%mm7,%%mm7\n"
- "movq (%0),%%mm0\n"
- "movq 8(%0),%%mm1\n"
- "add %2,%0\n"
- "subl $2, %%ecx\n"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "1:\n"
-
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
-
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
-
- "subl $2, %%ecx\n"
- "jnz 1b\n"
-
- "movd %%mm6,%1\n"
- : "+r" (pix), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp;
-}
-#undef SUM
-
-static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- int tmp;
-
- assert( (((int)pix1) & 7) == 0);
- assert( (((int)pix2) & 7) == 0);
- assert((line_size &7) ==0);
-
-#define SUM(in0, in1, out0, out1) \
- "movq (%0),%%mm2\n"\
- "movq (%1)," #out0 "\n"\
- "movq 8(%0),%%mm3\n"\
- "movq 8(%1)," #out1 "\n"\
- "add %3,%0\n"\
- "add %3,%1\n"\
- "psubb " #out0 ", %%mm2\n"\
- "psubb " #out1 ", %%mm3\n"\
- "pxor %%mm7, %%mm2\n"\
- "pxor %%mm7, %%mm3\n"\
- "movq %%mm2, " #out0 "\n"\
- "movq %%mm3, " #out1 "\n"\
- "psubusb " #in0 ", %%mm2\n"\
- "psubusb " #in1 ", %%mm3\n"\
- "psubusb " #out0 ", " #in0 "\n"\
- "psubusb " #out1 ", " #in1 "\n"\
- "por %%mm2, " #in0 "\n"\
- "por %%mm3, " #in1 "\n"\
- "movq " #in0 ", %%mm2\n"\
- "movq " #in1 ", %%mm3\n"\
- "punpcklbw %%mm7, " #in0 "\n"\
- "punpcklbw %%mm7, " #in1 "\n"\
- "punpckhbw %%mm7, %%mm2\n"\
- "punpckhbw %%mm7, %%mm3\n"\
- "paddw " #in1 ", " #in0 "\n"\
- "paddw %%mm3, %%mm2\n"\
- "paddw %%mm2, " #in0 "\n"\
- "paddw " #in0 ", %%mm6\n"
-
-
- asm volatile (
- "movl %4,%%ecx\n"
- "pxor %%mm6,%%mm6\n"
- "pcmpeqw %%mm7,%%mm7\n"
- "psllw $15, %%mm7\n"
- "packsswb %%mm7, %%mm7\n"
- "movq (%0),%%mm0\n"
- "movq (%1),%%mm2\n"
- "movq 8(%0),%%mm1\n"
- "movq 8(%1),%%mm3\n"
- "add %3,%0\n"
- "add %3,%1\n"
- "subl $2, %%ecx\n"
- "psubb %%mm2, %%mm0\n"
- "psubb %%mm3, %%mm1\n"
- "pxor %%mm7, %%mm0\n"
- "pxor %%mm7, %%mm1\n"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "1:\n"
-
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
-
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
-
- "subl $2, %%ecx\n"
- "jnz 1b\n"
-
- "movq %%mm6,%%mm0\n"
- "psrlq $32, %%mm6\n"
- "paddw %%mm6,%%mm0\n"
- "movq %%mm0,%%mm6\n"
- "psrlq $16, %%mm0\n"
- "paddw %%mm6,%%mm0\n"
- "movd %%mm0,%2\n"
- : "+r" (pix1), "+r" (pix2), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp & 0x7FFF;
-}
-#undef SUM
-
-static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- int tmp;
-
- assert( (((int)pix1) & 7) == 0);
- assert( (((int)pix2) & 7) == 0);
- assert((line_size &7) ==0);
-
-#define SUM(in0, in1, out0, out1) \
- "movq (%0)," #out0 "\n"\
- "movq (%1),%%mm2\n"\
- "movq 8(%0)," #out1 "\n"\
- "movq 8(%1),%%mm3\n"\
- "add %3,%0\n"\
- "add %3,%1\n"\
- "psubb %%mm2, " #out0 "\n"\
- "psubb %%mm3, " #out1 "\n"\
- "pxor %%mm7, " #out0 "\n"\
- "pxor %%mm7, " #out1 "\n"\
- "psadbw " #out0 ", " #in0 "\n"\
- "psadbw " #out1 ", " #in1 "\n"\
- "paddw " #in1 ", " #in0 "\n"\
- "paddw " #in0 ", %%mm6\n"
-
- asm volatile (
- "movl %4,%%ecx\n"
- "pxor %%mm6,%%mm6\n"
- "pcmpeqw %%mm7,%%mm7\n"
- "psllw $15, %%mm7\n"
- "packsswb %%mm7, %%mm7\n"
- "movq (%0),%%mm0\n"
- "movq (%1),%%mm2\n"
- "movq 8(%0),%%mm1\n"
- "movq 8(%1),%%mm3\n"
- "add %3,%0\n"
- "add %3,%1\n"
- "subl $2, %%ecx\n"
- "psubb %%mm2, %%mm0\n"
- "psubb %%mm3, %%mm1\n"
- "pxor %%mm7, %%mm0\n"
- "pxor %%mm7, %%mm1\n"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "1:\n"
-
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
-
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
-
- "subl $2, %%ecx\n"
- "jnz 1b\n"
-
- "movd %%mm6,%2\n"
- : "+r" (pix1), "+r" (pix2), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp;
-}
-#undef SUM
-
-static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
- long i=0;
- asm volatile(
- "1: \n\t"
- "movq (%2, %0), %%mm0 \n\t"
- "movq (%1, %0), %%mm1 \n\t"
- "psubb %%mm0, %%mm1 \n\t"
- "movq %%mm1, (%3, %0) \n\t"
- "movq 8(%2, %0), %%mm0 \n\t"
- "movq 8(%1, %0), %%mm1 \n\t"
- "psubb %%mm0, %%mm1 \n\t"
- "movq %%mm1, 8(%3, %0) \n\t"
- "add $16, %0 \n\t"
- "cmp %4, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (i)
- : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
- );
- for(; i<w; i++)
- dst[i+0] = src1[i+0]-src2[i+0];
-}
-
-static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
- long i=0;
- uint8_t l, lt;
-
- asm volatile(
- "1: \n\t"
- "movq -1(%1, %0), %%mm0 \n\t" // LT
- "movq (%1, %0), %%mm1 \n\t" // T
- "movq -1(%2, %0), %%mm2 \n\t" // L
- "movq (%2, %0), %%mm3 \n\t" // X
- "movq %%mm2, %%mm4 \n\t" // L
- "psubb %%mm0, %%mm2 \n\t"
- "paddb %%mm1, %%mm2 \n\t" // L + T - LT
- "movq %%mm4, %%mm5 \n\t" // L
- "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
- "pminub %%mm5, %%mm1 \n\t" // min(T, L)
- "pminub %%mm2, %%mm4 \n\t"
- "pmaxub %%mm1, %%mm4 \n\t"
- "psubb %%mm4, %%mm3 \n\t" // dst - pred
- "movq %%mm3, (%3, %0) \n\t"
- "add $8, %0 \n\t"
- "cmp %4, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (i)
- : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
- );
-
- l= *left;
- lt= *left_top;
-
- dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
-
- *left_top= src1[w-1];
- *left = src2[w-1];
-}
-
#define PAETH(cpu, abs3)\
void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
{\
@@ -1659,367 +770,6 @@ PAETH(mmx2, ABS3_MMX2)
PAETH(ssse3, ABS3_SSSE3)
#endif
-#define DIFF_PIXELS_1(m,a,t,p1,p2)\
- "mov"#m" "#p1", "#a" \n\t"\
- "mov"#m" "#p2", "#t" \n\t"\
- "punpcklbw "#a", "#t" \n\t"\
- "punpcklbw "#a", "#a" \n\t"\
- "psubw "#t", "#a" \n\t"\
-
-#define DIFF_PIXELS_8(m0,m1,mm,p1,p2,stride,temp) {\
- uint8_t *p1b=p1, *p2b=p2;\
- asm volatile(\
- DIFF_PIXELS_1(m0, mm##0, mm##7, (%1), (%2))\
- DIFF_PIXELS_1(m0, mm##1, mm##7, (%1,%3), (%2,%3))\
- DIFF_PIXELS_1(m0, mm##2, mm##7, (%1,%3,2), (%2,%3,2))\
- "add %4, %1 \n\t"\
- "add %4, %2 \n\t"\
- DIFF_PIXELS_1(m0, mm##3, mm##7, (%1), (%2))\
- DIFF_PIXELS_1(m0, mm##4, mm##7, (%1,%3), (%2,%3))\
- DIFF_PIXELS_1(m0, mm##5, mm##7, (%1,%3,2), (%2,%3,2))\
- DIFF_PIXELS_1(m0, mm##6, mm##7, (%1,%4), (%2,%4))\
- "mov"#m1" "#mm"0, %0 \n\t"\
- DIFF_PIXELS_1(m0, mm##7, mm##0, (%1,%3,4), (%2,%3,4))\
- "mov"#m1" %0, "#mm"0 \n\t"\
- : "+m"(temp), "+r"(p1b), "+r"(p2b)\
- : "r"((long)stride), "r"((long)stride*3)\
- );\
-}
- //the "+m"(temp) is needed as gcc 2.95 sometimes fails to compile "=m"(temp)
-
-#define DIFF_PIXELS_4x8(p1,p2,stride,temp) DIFF_PIXELS_8(d, q, %%mm, p1, p2, stride, temp)
-#define DIFF_PIXELS_8x8(p1,p2,stride,temp) DIFF_PIXELS_8(q, dqa, %%xmm, p1, p2, stride, temp)
-
-#define LBUTTERFLY2(a1,b1,a2,b2)\
- "paddw " #b1 ", " #a1 " \n\t"\
- "paddw " #b2 ", " #a2 " \n\t"\
- "paddw " #b1 ", " #b1 " \n\t"\
- "paddw " #b2 ", " #b2 " \n\t"\
- "psubw " #a1 ", " #b1 " \n\t"\
- "psubw " #a2 ", " #b2 " \n\t"
-
-#define HADAMARD8(m0, m1, m2, m3, m4, m5, m6, m7)\
- LBUTTERFLY2(m0, m1, m2, m3)\
- LBUTTERFLY2(m4, m5, m6, m7)\
- LBUTTERFLY2(m0, m2, m1, m3)\
- LBUTTERFLY2(m4, m6, m5, m7)\
- LBUTTERFLY2(m0, m4, m1, m5)\
- LBUTTERFLY2(m2, m6, m3, m7)\
-
-#define HADAMARD48 HADAMARD8(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm6, %%mm7)
-
-#define MMABS_MMX(a,z)\
- "pxor " #z ", " #z " \n\t"\
- "pcmpgtw " #a ", " #z " \n\t"\
- "pxor " #z ", " #a " \n\t"\
- "psubw " #z ", " #a " \n\t"
-
-#define MMABS_MMX2(a,z)\
- "pxor " #z ", " #z " \n\t"\
- "psubw " #a ", " #z " \n\t"\
- "pmaxsw " #z ", " #a " \n\t"
-
-#define MMABS_SSSE3(a,z)\
- "pabsw " #a ", " #a " \n\t"
-
-#define MMABS_SUM(a,z, sum)\
- MMABS(a,z)\
- "paddusw " #a ", " #sum " \n\t"
-
-#define MMABS_SUM_8x8_NOSPILL\
- MMABS(%%xmm0, %%xmm8)\
- MMABS(%%xmm1, %%xmm9)\
- MMABS_SUM(%%xmm2, %%xmm8, %%xmm0)\
- MMABS_SUM(%%xmm3, %%xmm9, %%xmm1)\
- MMABS_SUM(%%xmm4, %%xmm8, %%xmm0)\
- MMABS_SUM(%%xmm5, %%xmm9, %%xmm1)\
- MMABS_SUM(%%xmm6, %%xmm8, %%xmm0)\
- MMABS_SUM(%%xmm7, %%xmm9, %%xmm1)\
- "paddusw %%xmm1, %%xmm0 \n\t"
-
-#ifdef ARCH_X86_64
-#define MMABS_SUM_8x8_SSE2 MMABS_SUM_8x8_NOSPILL
-#else
-#define MMABS_SUM_8x8_SSE2\
- "movdqa %%xmm7, (%1) \n\t"\
- MMABS(%%xmm0, %%xmm7)\
- MMABS(%%xmm1, %%xmm7)\
- MMABS_SUM(%%xmm2, %%xmm7, %%xmm0)\
- MMABS_SUM(%%xmm3, %%xmm7, %%xmm1)\
- MMABS_SUM(%%xmm4, %%xmm7, %%xmm0)\
- MMABS_SUM(%%xmm5, %%xmm7, %%xmm1)\
- MMABS_SUM(%%xmm6, %%xmm7, %%xmm0)\
- "movdqa (%1), %%xmm2 \n\t"\
- MMABS_SUM(%%xmm2, %%xmm7, %%xmm1)\
- "paddusw %%xmm1, %%xmm0 \n\t"
-#endif
-
-#define LOAD4(o, a, b, c, d)\
- "movq "#o"(%1), "#a" \n\t"\
- "movq "#o"+8(%1), "#b" \n\t"\
- "movq "#o"+16(%1), "#c" \n\t"\
- "movq "#o"+24(%1), "#d" \n\t"\
-
-#define STORE4(o, a, b, c, d)\
- "movq "#a", "#o"(%1) \n\t"\
- "movq "#b", "#o"+8(%1) \n\t"\
- "movq "#c", "#o"+16(%1) \n\t"\
- "movq "#d", "#o"+24(%1) \n\t"\
-
-/* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
- * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
- * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
-#define HSUM_MMX(a, t, dst)\
- "movq "#a", "#t" \n\t"\
- "psrlq $32, "#a" \n\t"\
- "paddusw "#t", "#a" \n\t"\
- "movq "#a", "#t" \n\t"\
- "psrlq $16, "#a" \n\t"\
- "paddusw "#t", "#a" \n\t"\
- "movd "#a", "#dst" \n\t"\
-
-#define HSUM_MMX2(a, t, dst)\
- "pshufw $0x0E, "#a", "#t" \n\t"\
- "paddusw "#t", "#a" \n\t"\
- "pshufw $0x01, "#a", "#t" \n\t"\
- "paddusw "#t", "#a" \n\t"\
- "movd "#a", "#dst" \n\t"\
-
-#define HSUM_SSE2(a, t, dst)\
- "movhlps "#a", "#t" \n\t"\
- "paddusw "#t", "#a" \n\t"\
- "pshuflw $0x0E, "#a", "#t" \n\t"\
- "paddusw "#t", "#a" \n\t"\
- "pshuflw $0x01, "#a", "#t" \n\t"\
- "paddusw "#t", "#a" \n\t"\
- "movd "#a", "#dst" \n\t"\
-
-#define HADAMARD8_DIFF_MMX(cpu) \
-static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
- DECLARE_ALIGNED_8(uint64_t, temp[13]);\
- int sum;\
-\
- assert(h==8);\
-\
- DIFF_PIXELS_4x8(src1, src2, stride, temp[0]);\
-\
- asm volatile(\
- HADAMARD48\
-\
- "movq %%mm7, 96(%1) \n\t"\
-\
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
- STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)\
-\
- "movq 96(%1), %%mm7 \n\t"\
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
- STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)\
-\
- : "=r" (sum)\
- : "r"(temp)\
- );\
-\
- DIFF_PIXELS_4x8(src1+4, src2+4, stride, temp[4]);\
-\
- asm volatile(\
- HADAMARD48\
-\
- "movq %%mm7, 96(%1) \n\t"\
-\
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
- STORE4(32, %%mm0, %%mm3, %%mm7, %%mm2)\
-\
- "movq 96(%1), %%mm7 \n\t"\
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
- "movq %%mm7, %%mm5 \n\t"/*FIXME remove*/\
- "movq %%mm6, %%mm7 \n\t"\
- "movq %%mm0, %%mm6 \n\t"\
-\
- LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)\
-\
- HADAMARD48\
- "movq %%mm7, 64(%1) \n\t"\
- MMABS(%%mm0, %%mm7)\
- MMABS(%%mm1, %%mm7)\
- MMABS_SUM(%%mm2, %%mm7, %%mm0)\
- MMABS_SUM(%%mm3, %%mm7, %%mm1)\
- MMABS_SUM(%%mm4, %%mm7, %%mm0)\
- MMABS_SUM(%%mm5, %%mm7, %%mm1)\
- MMABS_SUM(%%mm6, %%mm7, %%mm0)\
- "movq 64(%1), %%mm2 \n\t"\
- MMABS_SUM(%%mm2, %%mm7, %%mm1)\
- "paddusw %%mm1, %%mm0 \n\t"\
- "movq %%mm0, 64(%1) \n\t"\
-\
- LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)\
- LOAD4(32, %%mm4, %%mm5, %%mm6, %%mm7)\
-\
- HADAMARD48\
- "movq %%mm7, (%1) \n\t"\
- MMABS(%%mm0, %%mm7)\
- MMABS(%%mm1, %%mm7)\
- MMABS_SUM(%%mm2, %%mm7, %%mm0)\
- MMABS_SUM(%%mm3, %%mm7, %%mm1)\
- MMABS_SUM(%%mm4, %%mm7, %%mm0)\
- MMABS_SUM(%%mm5, %%mm7, %%mm1)\
- MMABS_SUM(%%mm6, %%mm7, %%mm0)\
- "movq (%1), %%mm2 \n\t"\
- MMABS_SUM(%%mm2, %%mm7, %%mm1)\
- "paddusw 64(%1), %%mm0 \n\t"\
- "paddusw %%mm1, %%mm0 \n\t"\
-\
- HSUM(%%mm0, %%mm1, %0)\
-\
- : "=r" (sum)\
- : "r"(temp)\
- );\
- return sum&0xFFFF;\
-}\
-WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
-
-#define HADAMARD8_DIFF_SSE2(cpu) \
-static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
- DECLARE_ALIGNED_16(uint64_t, temp[4]);\
- int sum;\
-\
- assert(h==8);\
-\
- DIFF_PIXELS_8x8(src1, src2, stride, temp[0]);\
-\
- asm volatile(\
- HADAMARD8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)\
- TRANSPOSE8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7, (%1))\
- HADAMARD8(%%xmm0, %%xmm5, %%xmm7, %%xmm3, %%xmm6, %%xmm4, %%xmm2, %%xmm1)\
- MMABS_SUM_8x8\
- HSUM_SSE2(%%xmm0, %%xmm1, %0)\
- : "=r" (sum)\
- : "r"(temp)\
- );\
- return sum&0xFFFF;\
-}\
-WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
-
-#define MMABS(a,z) MMABS_MMX(a,z)
-#define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
-HADAMARD8_DIFF_MMX(mmx)
-#undef MMABS
-#undef HSUM
-
-#define MMABS(a,z) MMABS_MMX2(a,z)
-#define MMABS_SUM_8x8 MMABS_SUM_8x8_SSE2
-#define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
-HADAMARD8_DIFF_MMX(mmx2)
-HADAMARD8_DIFF_SSE2(sse2)
-#undef MMABS
-#undef MMABS_SUM_8x8
-#undef HSUM
-
-#ifdef HAVE_SSSE3
-#define MMABS(a,z) MMABS_SSSE3(a,z)
-#define MMABS_SUM_8x8 MMABS_SUM_8x8_NOSPILL
-HADAMARD8_DIFF_SSE2(ssse3)
-#undef MMABS
-#undef MMABS_SUM_8x8
-#endif
-
-#define DCT_SAD4(m,mm,o)\
- "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
- "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
- "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
- "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
- MMABS_SUM(mm##2, mm##6, mm##0)\
- MMABS_SUM(mm##3, mm##7, mm##1)\
- MMABS_SUM(mm##4, mm##6, mm##0)\
- MMABS_SUM(mm##5, mm##7, mm##1)\
-
-#define DCT_SAD_MMX\
- "pxor %%mm0, %%mm0 \n\t"\
- "pxor %%mm1, %%mm1 \n\t"\
- DCT_SAD4(q, %%mm, 0)\
- DCT_SAD4(q, %%mm, 8)\
- DCT_SAD4(q, %%mm, 64)\
- DCT_SAD4(q, %%mm, 72)\
- "paddusw %%mm1, %%mm0 \n\t"\
- HSUM(%%mm0, %%mm1, %0)
-
-#define DCT_SAD_SSE2\
- "pxor %%xmm0, %%xmm0 \n\t"\
- "pxor %%xmm1, %%xmm1 \n\t"\
- DCT_SAD4(dqa, %%xmm, 0)\
- DCT_SAD4(dqa, %%xmm, 64)\
- "paddusw %%xmm1, %%xmm0 \n\t"\
- HSUM(%%xmm0, %%xmm1, %0)
-
-#define DCT_SAD_FUNC(cpu) \
-static int sum_abs_dctelem_##cpu(DCTELEM *block){\
- int sum;\
- asm volatile(\
- DCT_SAD\
- :"=r"(sum)\
- :"r"(block)\
- );\
- return sum&0xFFFF;\
-}
-
-#define DCT_SAD DCT_SAD_MMX
-#define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
-#define MMABS(a,z) MMABS_MMX(a,z)
-DCT_SAD_FUNC(mmx)
-#undef MMABS
-#undef HSUM
-
-#define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
-#define MMABS(a,z) MMABS_MMX2(a,z)
-DCT_SAD_FUNC(mmx2)
-#undef HSUM
-#undef DCT_SAD
-
-#define DCT_SAD DCT_SAD_SSE2
-#define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
-DCT_SAD_FUNC(sse2)
-#undef MMABS
-
-#ifdef HAVE_SSSE3
-#define MMABS(a,z) MMABS_SSSE3(a,z)
-DCT_SAD_FUNC(ssse3)
-#undef MMABS
-#endif
-#undef HSUM
-#undef DCT_SAD
-
-static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
- int sum;
- long i=size;
- asm volatile(
- "pxor %%mm4, %%mm4 \n"
- "1: \n"
- "sub $8, %0 \n"
- "movq (%2,%0), %%mm2 \n"
- "movq (%3,%0,2), %%mm0 \n"
- "movq 8(%3,%0,2), %%mm1 \n"
- "punpckhbw %%mm2, %%mm3 \n"
- "punpcklbw %%mm2, %%mm2 \n"
- "psraw $8, %%mm3 \n"
- "psraw $8, %%mm2 \n"
- "psubw %%mm3, %%mm1 \n"
- "psubw %%mm2, %%mm0 \n"
- "pmaddwd %%mm1, %%mm1 \n"
- "pmaddwd %%mm0, %%mm0 \n"
- "paddd %%mm1, %%mm4 \n"
- "paddd %%mm0, %%mm4 \n"
- "jg 1b \n"
- "movq %%mm4, %%mm3 \n"
- "psrlq $32, %%mm3 \n"
- "paddd %%mm3, %%mm4 \n"
- "movd %%mm4, %1 \n"
- :"+r"(i), "=r"(sum)
- :"r"(pix1), "r"(pix2)
- );
- return sum;
-}
-
-#endif //CONFIG_ENCODERS
-
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
"paddw " #m4 ", " #m3 " \n\t" /* x1 */\
"movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
@@ -2858,72 +1608,6 @@ static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int o
}
}
-#ifdef CONFIG_ENCODERS
-
-#define PHADDD(a, t)\
- "movq "#a", "#t" \n\t"\
- "psrlq $32, "#a" \n\t"\
- "paddd "#t", "#a" \n\t"
-/*
- pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
- pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
- pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
- */
-#define PMULHRW(x, y, s, o)\
- "pmulhw " #s ", "#x " \n\t"\
- "pmulhw " #s ", "#y " \n\t"\
- "paddw " #o ", "#x " \n\t"\
- "paddw " #o ", "#y " \n\t"\
- "psraw $1, "#x " \n\t"\
- "psraw $1, "#y " \n\t"
-#define DEF(x) x ## _mmx
-#define SET_RND MOVQ_WONE
-#define SCALE_OFFSET 1
-
-#include "dsputil_mmx_qns.h"
-
-#undef DEF
-#undef SET_RND
-#undef SCALE_OFFSET
-#undef PMULHRW
-
-#define DEF(x) x ## _3dnow
-#define SET_RND(x)
-#define SCALE_OFFSET 0
-#define PMULHRW(x, y, s, o)\
- "pmulhrw " #s ", "#x " \n\t"\
- "pmulhrw " #s ", "#y " \n\t"
-
-#include "dsputil_mmx_qns.h"
-
-#undef DEF
-#undef SET_RND
-#undef SCALE_OFFSET
-#undef PMULHRW
-
-#ifdef HAVE_SSSE3
-#undef PHADDD
-#define DEF(x) x ## _ssse3
-#define SET_RND(x)
-#define SCALE_OFFSET -1
-#define PHADDD(a, t)\
- "pshufw $0x0E, "#a", "#t" \n\t"\
- "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
-#define PMULHRW(x, y, s, o)\
- "pmulhrsw " #s ", "#x " \n\t"\
- "pmulhrsw " #s ", "#y " \n\t"
-
-#include "dsputil_mmx_qns.h"
-
-#undef DEF
-#undef SET_RND
-#undef SCALE_OFFSET
-#undef PMULHRW
-#undef PHADDD
-#endif //HAVE_SSSE3
-
-#endif /* CONFIG_ENCODERS */
-
#define PREFETCH(name, op) \
static void name(void *mem, int stride, int h){\
const uint8_t *p= mem;\
@@ -2954,10 +1638,6 @@ void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
avg_pixels16_mmx(dst, src, stride, 16);
}
-/* FLAC specific */
-void ff_flac_compute_autocorr_sse2(const int32_t *data, int len, int lag,
- double *autoc);
-
/* VC1 specific */
void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
@@ -3320,18 +2000,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
if (mm_flags & MM_MMX) {
const int idct_algo= avctx->idct_algo;
-#ifdef CONFIG_ENCODERS
- const int dct_algo = avctx->dct_algo;
- if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
- if(mm_flags & MM_SSE2){
- c->fdct = ff_fdct_sse2;
- }else if(mm_flags & MM_MMXEXT){
- c->fdct = ff_fdct_mmx2;
- }else{
- c->fdct = ff_fdct_mmx;
- }
- }
-#endif //CONFIG_ENCODERS
if(avctx->lowres==0){
if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
c->idct_put= ff_simple_idct_put_mmx;
@@ -3382,17 +2050,10 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
}
-#ifdef CONFIG_ENCODERS
- c->get_pixels = get_pixels_mmx;
- c->diff_pixels = diff_pixels_mmx;
-#endif //CONFIG_ENCODERS
c->put_pixels_clamped = put_pixels_clamped_mmx;
c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
c->add_pixels_clamped = add_pixels_clamped_mmx;
c->clear_blocks = clear_blocks_mmx;
-#ifdef CONFIG_ENCODERS
- c->pix_sum = pix_sum16_mmx;
-#endif //CONFIG_ENCODERS
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
@@ -3413,32 +2074,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->add_bytes= add_bytes_mmx;
c->add_bytes_l2= add_bytes_l2_mmx;
-#ifdef CONFIG_ENCODERS
- c->diff_bytes= diff_bytes_mmx;
- c->sum_abs_dctelem= sum_abs_dctelem_mmx;
-
- c->hadamard8_diff[0]= hadamard8_diff16_mmx;
- c->hadamard8_diff[1]= hadamard8_diff_mmx;
-
- c->pix_norm1 = pix_norm1_mmx;
- c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
- c->sse[1] = sse8_mmx;
- c->vsad[4]= vsad_intra16_mmx;
-
- c->nsse[0] = nsse16_mmx;
- c->nsse[1] = nsse8_mmx;
- if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- c->vsad[0] = vsad16_mmx;
- }
-
- if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- c->try_8x8basis= try_8x8basis_mmx;
- }
- c->add_8x8basis= add_8x8basis_mmx;
-
- c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
-
-#endif //CONFIG_ENCODERS
if (ENABLE_ANY_H263) {
c->h263_v_loop_filter= h263_v_loop_filter_mmx;
@@ -3472,13 +2107,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
-#ifdef CONFIG_ENCODERS
- c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
- c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
- c->hadamard8_diff[1]= hadamard8_diff_mmx2;
- c->vsad[4]= vsad_intra16_mmx2;
-#endif //CONFIG_ENCODERS
-
c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
@@ -3489,9 +2117,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
-#ifdef CONFIG_ENCODERS
- c->vsad[0] = vsad16_mmx2;
-#endif //CONFIG_ENCODERS
}
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
@@ -3568,9 +2193,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
ff_vc1dsp_init_mmx(c, avctx);
c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
-#ifdef CONFIG_ENCODERS
- c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
-#endif //CONFIG_ENCODERS
} else if (mm_flags & MM_3DNOW) {
c->prefetch = prefetch_3dnow;
@@ -3666,28 +2288,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
#endif
-#ifdef CONFIG_ENCODERS
- if(mm_flags & MM_SSE2){
- c->sum_abs_dctelem= sum_abs_dctelem_sse2;
- c->hadamard8_diff[0]= hadamard8_diff16_sse2;
- c->hadamard8_diff[1]= hadamard8_diff_sse2;
- if (ENABLE_FLAC_ENCODER)
- c->flac_compute_autocorr = ff_flac_compute_autocorr_sse2;
- }
-
-#ifdef HAVE_SSSE3
- if(mm_flags & MM_SSSE3){
- if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- c->try_8x8basis= try_8x8basis_ssse3;
- }
- c->add_8x8basis= add_8x8basis_ssse3;
- c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
- c->hadamard8_diff[0]= hadamard8_diff16_ssse3;
- c->hadamard8_diff[1]= hadamard8_diff_ssse3;
- }
-#endif
-#endif
-
#ifdef CONFIG_SNOW_DECODER
if(mm_flags & MM_SSE2 & 0){
c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
@@ -3708,12 +2308,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
#endif
if(mm_flags & MM_3DNOW){
-#ifdef CONFIG_ENCODERS
- if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- c->try_8x8basis= try_8x8basis_3dnow;
- }
- c->add_8x8basis= add_8x8basis_3dnow;
-#endif //CONFIG_ENCODERS
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
c->vector_fmul = vector_fmul_3dnow;
if(!(avctx->flags & CODEC_FLAG_BITEXACT))
@@ -3732,9 +2326,9 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
}
-#ifdef CONFIG_ENCODERS
- dsputil_init_pix_mmx(c, avctx);
-#endif //CONFIG_ENCODERS
+ if (ENABLE_ENCODERS)
+ dsputilenc_init_mmx(c, avctx);
+
#if 0
// for speed testing
get_pixels = just_return;
diff --git a/libavcodec/i386/dsputil_mmx.h b/libavcodec/i386/dsputil_mmx.h
index 7747487b3b..6f0d5ef19a 100644
--- a/libavcodec/i386/dsputil_mmx.h
+++ b/libavcodec/i386/dsputil_mmx.h
@@ -23,6 +23,7 @@
#define FFMPEG_DSPUTIL_MMX_H
#include <stdint.h>
+#include "dsputil.h"
typedef struct { uint64_t a, b; } xmm_t;
@@ -43,6 +44,7 @@ extern const uint64_t ff_pw_42;
extern const uint64_t ff_pw_64;
extern const uint64_t ff_pw_96;
extern const uint64_t ff_pw_128;
+extern const uint64_t ff_pw_255;
extern const uint64_t ff_pb_1;
extern const uint64_t ff_pb_3;
@@ -111,4 +113,11 @@ extern const double ff_pd_2[2];
"movdqa 16"#t", "#g" \n\t"
#endif
+#define MOVQ_WONE(regd) \
+ asm volatile ( \
+ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
+ "psrlw $15, %%" #regd ::)
+
+void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx);
+
#endif /* FFMPEG_DSPUTIL_MMX_H */
diff --git a/libavcodec/i386/dsputilenc_mmx.c b/libavcodec/i386/dsputilenc_mmx.c
new file mode 100644
index 0000000000..c9fb670458
--- /dev/null
+++ b/libavcodec/i386/dsputilenc_mmx.c
@@ -0,0 +1,1422 @@
+/*
+ * MMX optimized DSP utils
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
+ */
+
+#include "dsputil.h"
+#include "dsputil_mmx.h"
+#include "mpegvideo.h"
+#include "x86_cpu.h"
+
+
+static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
+{
+ asm volatile(
+ "mov $-128, %%"REG_a" \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "movq (%0, %2), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "movq %%mm0, (%1, %%"REG_a") \n\t"
+ "movq %%mm1, 8(%1, %%"REG_a") \n\t"
+ "movq %%mm2, 16(%1, %%"REG_a") \n\t"
+ "movq %%mm3, 24(%1, %%"REG_a") \n\t"
+ "add %3, %0 \n\t"
+ "add $32, %%"REG_a" \n\t"
+ "js 1b \n\t"
+ : "+r" (pixels)
+ : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
+ : "%"REG_a
+ );
+}
+
+static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
+{
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "mov $-128, %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "movq (%1), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "movq %%mm1, 8(%2, %%"REG_a") \n\t"
+ "add %3, %0 \n\t"
+ "add %3, %1 \n\t"
+ "add $16, %%"REG_a" \n\t"
+ "jnz 1b \n\t"
+ : "+r" (s1), "+r" (s2)
+ : "r" (block+64), "r" ((long)stride)
+ : "%"REG_a
+ );
+}
+
+static int pix_sum16_mmx(uint8_t * pix, int line_size){
+ const int h=16;
+ int sum;
+ long index= -line_size*h;
+
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "pxor %%mm6, %%mm6 \n\t"
+ "1: \n\t"
+ "movq (%2, %1), %%mm0 \n\t"
+ "movq (%2, %1), %%mm1 \n\t"
+ "movq 8(%2, %1), %%mm2 \n\t"
+ "movq 8(%2, %1), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "paddw %%mm2, %%mm3 \n\t"
+ "paddw %%mm1, %%mm3 \n\t"
+ "paddw %%mm3, %%mm6 \n\t"
+ "add %3, %1 \n\t"
+ " js 1b \n\t"
+ "movq %%mm6, %%mm5 \n\t"
+ "psrlq $32, %%mm6 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "movq %%mm6, %%mm5 \n\t"
+ "psrlq $16, %%mm6 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "movd %%mm6, %0 \n\t"
+ "andl $0xFFFF, %0 \n\t"
+ : "=&r" (sum), "+r" (index)
+ : "r" (pix - index), "r" ((long)line_size)
+ );
+
+ return sum;
+}
+
+static int pix_norm1_mmx(uint8_t *pix, int line_size) {
+ int tmp;
+ asm volatile (
+ "movl $16,%%ecx\n"
+ "pxor %%mm0,%%mm0\n"
+ "pxor %%mm7,%%mm7\n"
+ "1:\n"
+ "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
+ "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
+
+ "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
+
+ "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
+ "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
+
+ "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
+ "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
+ "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
+
+ "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
+ "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
+
+ "pmaddwd %%mm3,%%mm3\n"
+ "pmaddwd %%mm4,%%mm4\n"
+
+ "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
+ pix2^2+pix3^2+pix6^2+pix7^2) */
+ "paddd %%mm3,%%mm4\n"
+ "paddd %%mm2,%%mm7\n"
+
+ "add %2, %0\n"
+ "paddd %%mm4,%%mm7\n"
+ "dec %%ecx\n"
+ "jnz 1b\n"
+
+ "movq %%mm7,%%mm1\n"
+ "psrlq $32, %%mm7\n" /* shift hi dword to lo */
+ "paddd %%mm7,%%mm1\n"
+ "movd %%mm1,%1\n"
+ : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
+ return tmp;
+}
+
+static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ int tmp;
+ asm volatile (
+ "movl %4,%%ecx\n"
+ "shr $1,%%ecx\n"
+ "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
+ "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
+ "1:\n"
+ "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
+ "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
+ "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
+ "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
+
+ /* todo: mm1-mm2, mm3-mm4 */
+ /* algo: subtract mm1 from mm2 with saturation and vice versa */
+ /* OR the results to get absolute difference */
+ "movq %%mm1,%%mm5\n"
+ "movq %%mm3,%%mm6\n"
+ "psubusb %%mm2,%%mm1\n"
+ "psubusb %%mm4,%%mm3\n"
+ "psubusb %%mm5,%%mm2\n"
+ "psubusb %%mm6,%%mm4\n"
+
+ "por %%mm1,%%mm2\n"
+ "por %%mm3,%%mm4\n"
+
+ /* now convert to 16-bit vectors so we can square them */
+ "movq %%mm2,%%mm1\n"
+ "movq %%mm4,%%mm3\n"
+
+ "punpckhbw %%mm0,%%mm2\n"
+ "punpckhbw %%mm0,%%mm4\n"
+ "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
+ "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
+
+ "pmaddwd %%mm2,%%mm2\n"
+ "pmaddwd %%mm4,%%mm4\n"
+ "pmaddwd %%mm1,%%mm1\n"
+ "pmaddwd %%mm3,%%mm3\n"
+
+ "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
+ "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
+
+ "paddd %%mm2,%%mm1\n"
+ "paddd %%mm4,%%mm3\n"
+ "paddd %%mm1,%%mm7\n"
+ "paddd %%mm3,%%mm7\n"
+
+ "decl %%ecx\n"
+ "jnz 1b\n"
+
+ "movq %%mm7,%%mm1\n"
+ "psrlq $32, %%mm7\n" /* shift hi dword to lo */
+ "paddd %%mm7,%%mm1\n"
+ "movd %%mm1,%2\n"
+ : "+r" (pix1), "+r" (pix2), "=r"(tmp)
+ : "r" ((long)line_size) , "m" (h)
+ : "%ecx");
+ return tmp;
+}
+
+static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ int tmp;
+ asm volatile (
+ "movl %4,%%ecx\n"
+ "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
+ "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
+ "1:\n"
+ "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
+ "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
+ "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
+ "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
+
+ /* todo: mm1-mm2, mm3-mm4 */
+ /* algo: subtract mm1 from mm2 with saturation and vice versa */
+ /* OR the results to get absolute difference */
+ "movq %%mm1,%%mm5\n"
+ "movq %%mm3,%%mm6\n"
+ "psubusb %%mm2,%%mm1\n"
+ "psubusb %%mm4,%%mm3\n"
+ "psubusb %%mm5,%%mm2\n"
+ "psubusb %%mm6,%%mm4\n"
+
+ "por %%mm1,%%mm2\n"
+ "por %%mm3,%%mm4\n"
+
+ /* now convert to 16-bit vectors so we can square them */
+ "movq %%mm2,%%mm1\n"
+ "movq %%mm4,%%mm3\n"
+
+ "punpckhbw %%mm0,%%mm2\n"
+ "punpckhbw %%mm0,%%mm4\n"
+ "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
+ "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
+
+ "pmaddwd %%mm2,%%mm2\n"
+ "pmaddwd %%mm4,%%mm4\n"
+ "pmaddwd %%mm1,%%mm1\n"
+ "pmaddwd %%mm3,%%mm3\n"
+
+ "add %3,%0\n"
+ "add %3,%1\n"
+
+ "paddd %%mm2,%%mm1\n"
+ "paddd %%mm4,%%mm3\n"
+ "paddd %%mm1,%%mm7\n"
+ "paddd %%mm3,%%mm7\n"
+
+ "decl %%ecx\n"
+ "jnz 1b\n"
+
+ "movq %%mm7,%%mm1\n"
+ "psrlq $32, %%mm7\n" /* shift hi dword to lo */
+ "paddd %%mm7,%%mm1\n"
+ "movd %%mm1,%2\n"
+ : "+r" (pix1), "+r" (pix2), "=r"(tmp)
+ : "r" ((long)line_size) , "m" (h)
+ : "%ecx");
+ return tmp;
+}
+
+static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ int tmp;
+ asm volatile (
+ "shr $1,%2\n"
+ "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
+ "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
+ "1:\n"
+ "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
+ "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
+ "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
+ "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
+
+ /* todo: mm1-mm2, mm3-mm4 */
+ /* algo: subtract mm1 from mm2 with saturation and vice versa */
+ /* OR the results to get absolute difference */
+ "movdqa %%xmm1,%%xmm5\n"
+ "movdqa %%xmm3,%%xmm6\n"
+ "psubusb %%xmm2,%%xmm1\n"
+ "psubusb %%xmm4,%%xmm3\n"
+ "psubusb %%xmm5,%%xmm2\n"
+ "psubusb %%xmm6,%%xmm4\n"
+
+ "por %%xmm1,%%xmm2\n"
+ "por %%xmm3,%%xmm4\n"
+
+ /* now convert to 16-bit vectors so we can square them */
+ "movdqa %%xmm2,%%xmm1\n"
+ "movdqa %%xmm4,%%xmm3\n"
+
+ "punpckhbw %%xmm0,%%xmm2\n"
+ "punpckhbw %%xmm0,%%xmm4\n"
+ "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
+ "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
+
+ "pmaddwd %%xmm2,%%xmm2\n"
+ "pmaddwd %%xmm4,%%xmm4\n"
+ "pmaddwd %%xmm1,%%xmm1\n"
+ "pmaddwd %%xmm3,%%xmm3\n"
+
+ "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
+ "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
+
+ "paddd %%xmm2,%%xmm1\n"
+ "paddd %%xmm4,%%xmm3\n"
+ "paddd %%xmm1,%%xmm7\n"
+ "paddd %%xmm3,%%xmm7\n"
+
+ "decl %2\n"
+ "jnz 1b\n"
+
+ "movdqa %%xmm7,%%xmm1\n"
+ "psrldq $8, %%xmm7\n" /* shift hi qword to lo */
+ "paddd %%xmm1,%%xmm7\n"
+ "movdqa %%xmm7,%%xmm1\n"
+ "psrldq $4, %%xmm7\n" /* shift hi dword to lo */
+ "paddd %%xmm1,%%xmm7\n"
+ "movd %%xmm7,%3\n"
+ : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
+ : "r" ((long)line_size));
+ return tmp;
+}
+
+static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
+ int tmp;
+ asm volatile (
+ "movl %3,%%ecx\n"
+ "pxor %%mm7,%%mm7\n"
+ "pxor %%mm6,%%mm6\n"
+
+ "movq (%0),%%mm0\n"
+ "movq %%mm0, %%mm1\n"
+ "psllq $8, %%mm0\n"
+ "psrlq $8, %%mm1\n"
+ "psrlq $8, %%mm0\n"
+ "movq %%mm0, %%mm2\n"
+ "movq %%mm1, %%mm3\n"
+ "punpcklbw %%mm7,%%mm0\n"
+ "punpcklbw %%mm7,%%mm1\n"
+ "punpckhbw %%mm7,%%mm2\n"
+ "punpckhbw %%mm7,%%mm3\n"
+ "psubw %%mm1, %%mm0\n"
+ "psubw %%mm3, %%mm2\n"
+
+ "add %2,%0\n"
+
+ "movq (%0),%%mm4\n"
+ "movq %%mm4, %%mm1\n"
+ "psllq $8, %%mm4\n"
+ "psrlq $8, %%mm1\n"
+ "psrlq $8, %%mm4\n"
+ "movq %%mm4, %%mm5\n"
+ "movq %%mm1, %%mm3\n"
+ "punpcklbw %%mm7,%%mm4\n"
+ "punpcklbw %%mm7,%%mm1\n"
+ "punpckhbw %%mm7,%%mm5\n"
+ "punpckhbw %%mm7,%%mm3\n"
+ "psubw %%mm1, %%mm4\n"
+ "psubw %%mm3, %%mm5\n"
+ "psubw %%mm4, %%mm0\n"
+ "psubw %%mm5, %%mm2\n"
+ "pxor %%mm3, %%mm3\n"
+ "pxor %%mm1, %%mm1\n"
+ "pcmpgtw %%mm0, %%mm3\n\t"
+ "pcmpgtw %%mm2, %%mm1\n\t"
+ "pxor %%mm3, %%mm0\n"
+ "pxor %%mm1, %%mm2\n"
+ "psubw %%mm3, %%mm0\n"
+ "psubw %%mm1, %%mm2\n"
+ "paddw %%mm0, %%mm2\n"
+ "paddw %%mm2, %%mm6\n"
+
+ "add %2,%0\n"
+ "1:\n"
+
+ "movq (%0),%%mm0\n"
+ "movq %%mm0, %%mm1\n"
+ "psllq $8, %%mm0\n"
+ "psrlq $8, %%mm1\n"
+ "psrlq $8, %%mm0\n"
+ "movq %%mm0, %%mm2\n"
+ "movq %%mm1, %%mm3\n"
+ "punpcklbw %%mm7,%%mm0\n"
+ "punpcklbw %%mm7,%%mm1\n"
+ "punpckhbw %%mm7,%%mm2\n"
+ "punpckhbw %%mm7,%%mm3\n"
+ "psubw %%mm1, %%mm0\n"
+ "psubw %%mm3, %%mm2\n"
+ "psubw %%mm0, %%mm4\n"
+ "psubw %%mm2, %%mm5\n"
+ "pxor %%mm3, %%mm3\n"
+ "pxor %%mm1, %%mm1\n"
+ "pcmpgtw %%mm4, %%mm3\n\t"
+ "pcmpgtw %%mm5, %%mm1\n\t"
+ "pxor %%mm3, %%mm4\n"
+ "pxor %%mm1, %%mm5\n"
+ "psubw %%mm3, %%mm4\n"
+ "psubw %%mm1, %%mm5\n"
+ "paddw %%mm4, %%mm5\n"
+ "paddw %%mm5, %%mm6\n"
+
+ "add %2,%0\n"
+
+ "movq (%0),%%mm4\n"
+ "movq %%mm4, %%mm1\n"
+ "psllq $8, %%mm4\n"
+ "psrlq $8, %%mm1\n"
+ "psrlq $8, %%mm4\n"
+ "movq %%mm4, %%mm5\n"
+ "movq %%mm1, %%mm3\n"
+ "punpcklbw %%mm7,%%mm4\n"
+ "punpcklbw %%mm7,%%mm1\n"
+ "punpckhbw %%mm7,%%mm5\n"
+ "punpckhbw %%mm7,%%mm3\n"
+ "psubw %%mm1, %%mm4\n"
+ "psubw %%mm3, %%mm5\n"
+ "psubw %%mm4, %%mm0\n"
+ "psubw %%mm5, %%mm2\n"
+ "pxor %%mm3, %%mm3\n"
+ "pxor %%mm1, %%mm1\n"
+ "pcmpgtw %%mm0, %%mm3\n\t"
+ "pcmpgtw %%mm2, %%mm1\n\t"
+ "pxor %%mm3, %%mm0\n"
+ "pxor %%mm1, %%mm2\n"
+ "psubw %%mm3, %%mm0\n"
+ "psubw %%mm1, %%mm2\n"
+ "paddw %%mm0, %%mm2\n"
+ "paddw %%mm2, %%mm6\n"
+
+ "add %2,%0\n"
+ "subl $2, %%ecx\n"
+ " jnz 1b\n"
+
+ "movq %%mm6, %%mm0\n"
+ "punpcklwd %%mm7,%%mm0\n"
+ "punpckhwd %%mm7,%%mm6\n"
+ "paddd %%mm0, %%mm6\n"
+
+ "movq %%mm6,%%mm0\n"
+ "psrlq $32, %%mm6\n"
+ "paddd %%mm6,%%mm0\n"
+ "movd %%mm0,%1\n"
+ : "+r" (pix1), "=r"(tmp)
+ : "r" ((long)line_size) , "g" (h-2)
+ : "%ecx");
+ return tmp;
+}
+
+static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
+ int tmp;
+ uint8_t * pix= pix1;
+ asm volatile (
+ "movl %3,%%ecx\n"
+ "pxor %%mm7,%%mm7\n"
+ "pxor %%mm6,%%mm6\n"
+
+ "movq (%0),%%mm0\n"
+ "movq 1(%0),%%mm1\n"
+ "movq %%mm0, %%mm2\n"
+ "movq %%mm1, %%mm3\n"
+ "punpcklbw %%mm7,%%mm0\n"
+ "punpcklbw %%mm7,%%mm1\n"
+ "punpckhbw %%mm7,%%mm2\n"
+ "punpckhbw %%mm7,%%mm3\n"
+ "psubw %%mm1, %%mm0\n"
+ "psubw %%mm3, %%mm2\n"
+
+ "add %2,%0\n"
+
+ "movq (%0),%%mm4\n"
+ "movq 1(%0),%%mm1\n"
+ "movq %%mm4, %%mm5\n"
+ "movq %%mm1, %%mm3\n"
+ "punpcklbw %%mm7,%%mm4\n"
+ "punpcklbw %%mm7,%%mm1\n"
+ "punpckhbw %%mm7,%%mm5\n"
+ "punpckhbw %%mm7,%%mm3\n"
+ "psubw %%mm1, %%mm4\n"
+ "psubw %%mm3, %%mm5\n"
+ "psubw %%mm4, %%mm0\n"
+ "psubw %%mm5, %%mm2\n"
+ "pxor %%mm3, %%mm3\n"
+ "pxor %%mm1, %%mm1\n"
+ "pcmpgtw %%mm0, %%mm3\n\t"
+ "pcmpgtw %%mm2, %%mm1\n\t"
+ "pxor %%mm3, %%mm0\n"
+ "pxor %%mm1, %%mm2\n"
+ "psubw %%mm3, %%mm0\n"
+ "psubw %%mm1, %%mm2\n"
+ "paddw %%mm0, %%mm2\n"
+ "paddw %%mm2, %%mm6\n"
+
+ "add %2,%0\n"
+ "1:\n"
+
+ "movq (%0),%%mm0\n"
+ "movq 1(%0),%%mm1\n"
+ "movq %%mm0, %%mm2\n"
+ "movq %%mm1, %%mm3\n"
+ "punpcklbw %%mm7,%%mm0\n"
+ "punpcklbw %%mm7,%%mm1\n"
+ "punpckhbw %%mm7,%%mm2\n"
+ "punpckhbw %%mm7,%%mm3\n"
+ "psubw %%mm1, %%mm0\n"
+ "psubw %%mm3, %%mm2\n"
+ "psubw %%mm0, %%mm4\n"
+ "psubw %%mm2, %%mm5\n"
+ "pxor %%mm3, %%mm3\n"
+ "pxor %%mm1, %%mm1\n"
+ "pcmpgtw %%mm4, %%mm3\n\t"
+ "pcmpgtw %%mm5, %%mm1\n\t"
+ "pxor %%mm3, %%mm4\n"
+ "pxor %%mm1, %%mm5\n"
+ "psubw %%mm3, %%mm4\n"
+ "psubw %%mm1, %%mm5\n"
+ "paddw %%mm4, %%mm5\n"
+ "paddw %%mm5, %%mm6\n"
+
+ "add %2,%0\n"
+
+ "movq (%0),%%mm4\n"
+ "movq 1(%0),%%mm1\n"
+ "movq %%mm4, %%mm5\n"
+ "movq %%mm1, %%mm3\n"
+ "punpcklbw %%mm7,%%mm4\n"
+ "punpcklbw %%mm7,%%mm1\n"
+ "punpckhbw %%mm7,%%mm5\n"
+ "punpckhbw %%mm7,%%mm3\n"
+ "psubw %%mm1, %%mm4\n"
+ "psubw %%mm3, %%mm5\n"
+ "psubw %%mm4, %%mm0\n"
+ "psubw %%mm5, %%mm2\n"
+ "pxor %%mm3, %%mm3\n"
+ "pxor %%mm1, %%mm1\n"
+ "pcmpgtw %%mm0, %%mm3\n\t"
+ "pcmpgtw %%mm2, %%mm1\n\t"
+ "pxor %%mm3, %%mm0\n"
+ "pxor %%mm1, %%mm2\n"
+ "psubw %%mm3, %%mm0\n"
+ "psubw %%mm1, %%mm2\n"
+ "paddw %%mm0, %%mm2\n"
+ "paddw %%mm2, %%mm6\n"
+
+ "add %2,%0\n"
+ "subl $2, %%ecx\n"
+ " jnz 1b\n"
+
+ "movq %%mm6, %%mm0\n"
+ "punpcklwd %%mm7,%%mm0\n"
+ "punpckhwd %%mm7,%%mm6\n"
+ "paddd %%mm0, %%mm6\n"
+
+ "movq %%mm6,%%mm0\n"
+ "psrlq $32, %%mm6\n"
+ "paddd %%mm6,%%mm0\n"
+ "movd %%mm0,%1\n"
+ : "+r" (pix1), "=r"(tmp)
+ : "r" ((long)line_size) , "g" (h-2)
+ : "%ecx");
+ return tmp + hf_noise8_mmx(pix+8, line_size, h);
+}
+
+static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ MpegEncContext *c = p;
+ int score1, score2;
+
+ if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
+ else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
+ score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
+
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
+}
+
+static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ MpegEncContext *c = p;
+ int score1= sse8_mmx(c, pix1, pix2, line_size, h);
+ int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
+
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
+}
+
+static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
+ int tmp;
+
+ assert( (((int)pix) & 7) == 0);
+ assert((line_size &7) ==0);
+
+#define SUM(in0, in1, out0, out1) \
+ "movq (%0), %%mm2\n"\
+ "movq 8(%0), %%mm3\n"\
+ "add %2,%0\n"\
+ "movq %%mm2, " #out0 "\n"\
+ "movq %%mm3, " #out1 "\n"\
+ "psubusb " #in0 ", %%mm2\n"\
+ "psubusb " #in1 ", %%mm3\n"\
+ "psubusb " #out0 ", " #in0 "\n"\
+ "psubusb " #out1 ", " #in1 "\n"\
+ "por %%mm2, " #in0 "\n"\
+ "por %%mm3, " #in1 "\n"\
+ "movq " #in0 ", %%mm2\n"\
+ "movq " #in1 ", %%mm3\n"\
+ "punpcklbw %%mm7, " #in0 "\n"\
+ "punpcklbw %%mm7, " #in1 "\n"\
+ "punpckhbw %%mm7, %%mm2\n"\
+ "punpckhbw %%mm7, %%mm3\n"\
+ "paddw " #in1 ", " #in0 "\n"\
+ "paddw %%mm3, %%mm2\n"\
+ "paddw %%mm2, " #in0 "\n"\
+ "paddw " #in0 ", %%mm6\n"
+
+
+ asm volatile (
+ "movl %3,%%ecx\n"
+ "pxor %%mm6,%%mm6\n"
+ "pxor %%mm7,%%mm7\n"
+ "movq (%0),%%mm0\n"
+ "movq 8(%0),%%mm1\n"
+ "add %2,%0\n"
+ "subl $2, %%ecx\n"
+ SUM(%%mm0, %%mm1, %%mm4, %%mm5)
+ "1:\n"
+
+ SUM(%%mm4, %%mm5, %%mm0, %%mm1)
+
+ SUM(%%mm0, %%mm1, %%mm4, %%mm5)
+
+ "subl $2, %%ecx\n"
+ "jnz 1b\n"
+
+ "movq %%mm6,%%mm0\n"
+ "psrlq $32, %%mm6\n"
+ "paddw %%mm6,%%mm0\n"
+ "movq %%mm0,%%mm6\n"
+ "psrlq $16, %%mm0\n"
+ "paddw %%mm6,%%mm0\n"
+ "movd %%mm0,%1\n"
+ : "+r" (pix), "=r"(tmp)
+ : "r" ((long)line_size) , "m" (h)
+ : "%ecx");
+ return tmp & 0xFFFF;
+}
+#undef SUM
+
+static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
+ int tmp;
+
+ assert( (((int)pix) & 7) == 0);
+ assert((line_size &7) ==0);
+
+#define SUM(in0, in1, out0, out1) \
+ "movq (%0), " #out0 "\n"\
+ "movq 8(%0), " #out1 "\n"\
+ "add %2,%0\n"\
+ "psadbw " #out0 ", " #in0 "\n"\
+ "psadbw " #out1 ", " #in1 "\n"\
+ "paddw " #in1 ", " #in0 "\n"\
+ "paddw " #in0 ", %%mm6\n"
+
+ asm volatile (
+ "movl %3,%%ecx\n"
+ "pxor %%mm6,%%mm6\n"
+ "pxor %%mm7,%%mm7\n"
+ "movq (%0),%%mm0\n"
+ "movq 8(%0),%%mm1\n"
+ "add %2,%0\n"
+ "subl $2, %%ecx\n"
+ SUM(%%mm0, %%mm1, %%mm4, %%mm5)
+ "1:\n"
+
+ SUM(%%mm4, %%mm5, %%mm0, %%mm1)
+
+ SUM(%%mm0, %%mm1, %%mm4, %%mm5)
+
+ "subl $2, %%ecx\n"
+ "jnz 1b\n"
+
+ "movd %%mm6,%1\n"
+ : "+r" (pix), "=r"(tmp)
+ : "r" ((long)line_size) , "m" (h)
+ : "%ecx");
+ return tmp;
+}
+#undef SUM
+
+static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ int tmp;
+
+ assert( (((int)pix1) & 7) == 0);
+ assert( (((int)pix2) & 7) == 0);
+ assert((line_size &7) ==0);
+
+#define SUM(in0, in1, out0, out1) \
+ "movq (%0),%%mm2\n"\
+ "movq (%1)," #out0 "\n"\
+ "movq 8(%0),%%mm3\n"\
+ "movq 8(%1)," #out1 "\n"\
+ "add %3,%0\n"\
+ "add %3,%1\n"\
+ "psubb " #out0 ", %%mm2\n"\
+ "psubb " #out1 ", %%mm3\n"\
+ "pxor %%mm7, %%mm2\n"\
+ "pxor %%mm7, %%mm3\n"\
+ "movq %%mm2, " #out0 "\n"\
+ "movq %%mm3, " #out1 "\n"\
+ "psubusb " #in0 ", %%mm2\n"\
+ "psubusb " #in1 ", %%mm3\n"\
+ "psubusb " #out0 ", " #in0 "\n"\
+ "psubusb " #out1 ", " #in1 "\n"\
+ "por %%mm2, " #in0 "\n"\
+ "por %%mm3, " #in1 "\n"\
+ "movq " #in0 ", %%mm2\n"\
+ "movq " #in1 ", %%mm3\n"\
+ "punpcklbw %%mm7, " #in0 "\n"\
+ "punpcklbw %%mm7, " #in1 "\n"\
+ "punpckhbw %%mm7, %%mm2\n"\
+ "punpckhbw %%mm7, %%mm3\n"\
+ "paddw " #in1 ", " #in0 "\n"\
+ "paddw %%mm3, %%mm2\n"\
+ "paddw %%mm2, " #in0 "\n"\
+ "paddw " #in0 ", %%mm6\n"
+
+
+ asm volatile (
+ "movl %4,%%ecx\n"
+ "pxor %%mm6,%%mm6\n"
+ "pcmpeqw %%mm7,%%mm7\n"
+ "psllw $15, %%mm7\n"
+ "packsswb %%mm7, %%mm7\n"
+ "movq (%0),%%mm0\n"
+ "movq (%1),%%mm2\n"
+ "movq 8(%0),%%mm1\n"
+ "movq 8(%1),%%mm3\n"
+ "add %3,%0\n"
+ "add %3,%1\n"
+ "subl $2, %%ecx\n"
+ "psubb %%mm2, %%mm0\n"
+ "psubb %%mm3, %%mm1\n"
+ "pxor %%mm7, %%mm0\n"
+ "pxor %%mm7, %%mm1\n"
+ SUM(%%mm0, %%mm1, %%mm4, %%mm5)
+ "1:\n"
+
+ SUM(%%mm4, %%mm5, %%mm0, %%mm1)
+
+ SUM(%%mm0, %%mm1, %%mm4, %%mm5)
+
+ "subl $2, %%ecx\n"
+ "jnz 1b\n"
+
+ "movq %%mm6,%%mm0\n"
+ "psrlq $32, %%mm6\n"
+ "paddw %%mm6,%%mm0\n"
+ "movq %%mm0,%%mm6\n"
+ "psrlq $16, %%mm0\n"
+ "paddw %%mm6,%%mm0\n"
+ "movd %%mm0,%2\n"
+ : "+r" (pix1), "+r" (pix2), "=r"(tmp)
+ : "r" ((long)line_size) , "m" (h)
+ : "%ecx");
+ return tmp & 0x7FFF;
+}
+#undef SUM
+
+static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ int tmp;
+
+ assert( (((int)pix1) & 7) == 0);
+ assert( (((int)pix2) & 7) == 0);
+ assert((line_size &7) ==0);
+
+#define SUM(in0, in1, out0, out1) \
+ "movq (%0)," #out0 "\n"\
+ "movq (%1),%%mm2\n"\
+ "movq 8(%0)," #out1 "\n"\
+ "movq 8(%1),%%mm3\n"\
+ "add %3,%0\n"\
+ "add %3,%1\n"\
+ "psubb %%mm2, " #out0 "\n"\
+ "psubb %%mm3, " #out1 "\n"\
+ "pxor %%mm7, " #out0 "\n"\
+ "pxor %%mm7, " #out1 "\n"\
+ "psadbw " #out0 ", " #in0 "\n"\
+ "psadbw " #out1 ", " #in1 "\n"\
+ "paddw " #in1 ", " #in0 "\n"\
+ "paddw " #in0 ", %%mm6\n"
+
+ asm volatile (
+ "movl %4,%%ecx\n"
+ "pxor %%mm6,%%mm6\n"
+ "pcmpeqw %%mm7,%%mm7\n"
+ "psllw $15, %%mm7\n"
+ "packsswb %%mm7, %%mm7\n"
+ "movq (%0),%%mm0\n"
+ "movq (%1),%%mm2\n"
+ "movq 8(%0),%%mm1\n"
+ "movq 8(%1),%%mm3\n"
+ "add %3,%0\n"
+ "add %3,%1\n"
+ "subl $2, %%ecx\n"
+ "psubb %%mm2, %%mm0\n"
+ "psubb %%mm3, %%mm1\n"
+ "pxor %%mm7, %%mm0\n"
+ "pxor %%mm7, %%mm1\n"
+ SUM(%%mm0, %%mm1, %%mm4, %%mm5)
+ "1:\n"
+
+ SUM(%%mm4, %%mm5, %%mm0, %%mm1)
+
+ SUM(%%mm0, %%mm1, %%mm4, %%mm5)
+
+ "subl $2, %%ecx\n"
+ "jnz 1b\n"
+
+ "movd %%mm6,%2\n"
+ : "+r" (pix1), "+r" (pix2), "=r"(tmp)
+ : "r" ((long)line_size) , "m" (h)
+ : "%ecx");
+ return tmp;
+}
+#undef SUM
+
+static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
+ long i=0;
+ asm volatile(
+ "1: \n\t"
+ "movq (%2, %0), %%mm0 \n\t"
+ "movq (%1, %0), %%mm1 \n\t"
+ "psubb %%mm0, %%mm1 \n\t"
+ "movq %%mm1, (%3, %0) \n\t"
+ "movq 8(%2, %0), %%mm0 \n\t"
+ "movq 8(%1, %0), %%mm1 \n\t"
+ "psubb %%mm0, %%mm1 \n\t"
+ "movq %%mm1, 8(%3, %0) \n\t"
+ "add $16, %0 \n\t"
+ "cmp %4, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (i)
+ : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
+ );
+ for(; i<w; i++)
+ dst[i+0] = src1[i+0]-src2[i+0];
+}
+
+static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
+ long i=0;
+ uint8_t l, lt;
+
+ asm volatile(
+ "1: \n\t"
+ "movq -1(%1, %0), %%mm0 \n\t" // LT
+ "movq (%1, %0), %%mm1 \n\t" // T
+ "movq -1(%2, %0), %%mm2 \n\t" // L
+ "movq (%2, %0), %%mm3 \n\t" // X
+ "movq %%mm2, %%mm4 \n\t" // L
+ "psubb %%mm0, %%mm2 \n\t"
+ "paddb %%mm1, %%mm2 \n\t" // L + T - LT
+ "movq %%mm4, %%mm5 \n\t" // L
+ "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
+ "pminub %%mm5, %%mm1 \n\t" // min(T, L)
+ "pminub %%mm2, %%mm4 \n\t"
+ "pmaxub %%mm1, %%mm4 \n\t"
+ "psubb %%mm4, %%mm3 \n\t" // dst - pred
+ "movq %%mm3, (%3, %0) \n\t"
+ "add $8, %0 \n\t"
+ "cmp %4, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (i)
+ : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
+ );
+
+ l= *left;
+ lt= *left_top;
+
+ dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
+
+ *left_top= src1[w-1];
+ *left = src2[w-1];
+}
+
+#define DIFF_PIXELS_1(m,a,t,p1,p2)\
+ "mov"#m" "#p1", "#a" \n\t"\
+ "mov"#m" "#p2", "#t" \n\t"\
+ "punpcklbw "#a", "#t" \n\t"\
+ "punpcklbw "#a", "#a" \n\t"\
+ "psubw "#t", "#a" \n\t"\
+
+#define DIFF_PIXELS_8(m0,m1,mm,p1,p2,stride,temp) {\
+ uint8_t *p1b=p1, *p2b=p2;\
+ asm volatile(\
+ DIFF_PIXELS_1(m0, mm##0, mm##7, (%1), (%2))\
+ DIFF_PIXELS_1(m0, mm##1, mm##7, (%1,%3), (%2,%3))\
+ DIFF_PIXELS_1(m0, mm##2, mm##7, (%1,%3,2), (%2,%3,2))\
+ "add %4, %1 \n\t"\
+ "add %4, %2 \n\t"\
+ DIFF_PIXELS_1(m0, mm##3, mm##7, (%1), (%2))\
+ DIFF_PIXELS_1(m0, mm##4, mm##7, (%1,%3), (%2,%3))\
+ DIFF_PIXELS_1(m0, mm##5, mm##7, (%1,%3,2), (%2,%3,2))\
+ DIFF_PIXELS_1(m0, mm##6, mm##7, (%1,%4), (%2,%4))\
+ "mov"#m1" "#mm"0, %0 \n\t"\
+ DIFF_PIXELS_1(m0, mm##7, mm##0, (%1,%3,4), (%2,%3,4))\
+ "mov"#m1" %0, "#mm"0 \n\t"\
+ : "+m"(temp), "+r"(p1b), "+r"(p2b)\
+ : "r"((long)stride), "r"((long)stride*3)\
+ );\
+}
+ //the "+m"(temp) is needed as gcc 2.95 sometimes fails to compile "=m"(temp)
+
+#define DIFF_PIXELS_4x8(p1,p2,stride,temp) DIFF_PIXELS_8(d, q, %%mm, p1, p2, stride, temp)
+#define DIFF_PIXELS_8x8(p1,p2,stride,temp) DIFF_PIXELS_8(q, dqa, %%xmm, p1, p2, stride, temp)
+
+#define LBUTTERFLY2(a1,b1,a2,b2)\
+ "paddw " #b1 ", " #a1 " \n\t"\
+ "paddw " #b2 ", " #a2 " \n\t"\
+ "paddw " #b1 ", " #b1 " \n\t"\
+ "paddw " #b2 ", " #b2 " \n\t"\
+ "psubw " #a1 ", " #b1 " \n\t"\
+ "psubw " #a2 ", " #b2 " \n\t"
+
+#define HADAMARD8(m0, m1, m2, m3, m4, m5, m6, m7)\
+ LBUTTERFLY2(m0, m1, m2, m3)\
+ LBUTTERFLY2(m4, m5, m6, m7)\
+ LBUTTERFLY2(m0, m2, m1, m3)\
+ LBUTTERFLY2(m4, m6, m5, m7)\
+ LBUTTERFLY2(m0, m4, m1, m5)\
+ LBUTTERFLY2(m2, m6, m3, m7)\
+
+#define HADAMARD48 HADAMARD8(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm6, %%mm7)
+
+#define MMABS_MMX(a,z)\
+ "pxor " #z ", " #z " \n\t"\
+ "pcmpgtw " #a ", " #z " \n\t"\
+ "pxor " #z ", " #a " \n\t"\
+ "psubw " #z ", " #a " \n\t"
+
+#define MMABS_MMX2(a,z)\
+ "pxor " #z ", " #z " \n\t"\
+ "psubw " #a ", " #z " \n\t"\
+ "pmaxsw " #z ", " #a " \n\t"
+
+#define MMABS_SSSE3(a,z)\
+ "pabsw " #a ", " #a " \n\t"
+
+#define MMABS_SUM(a,z, sum)\
+ MMABS(a,z)\
+ "paddusw " #a ", " #sum " \n\t"
+
+#define MMABS_SUM_8x8_NOSPILL\
+ MMABS(%%xmm0, %%xmm8)\
+ MMABS(%%xmm1, %%xmm9)\
+ MMABS_SUM(%%xmm2, %%xmm8, %%xmm0)\
+ MMABS_SUM(%%xmm3, %%xmm9, %%xmm1)\
+ MMABS_SUM(%%xmm4, %%xmm8, %%xmm0)\
+ MMABS_SUM(%%xmm5, %%xmm9, %%xmm1)\
+ MMABS_SUM(%%xmm6, %%xmm8, %%xmm0)\
+ MMABS_SUM(%%xmm7, %%xmm9, %%xmm1)\
+ "paddusw %%xmm1, %%xmm0 \n\t"
+
+#ifdef ARCH_X86_64
+#define MMABS_SUM_8x8_SSE2 MMABS_SUM_8x8_NOSPILL
+#else
+#define MMABS_SUM_8x8_SSE2\
+ "movdqa %%xmm7, (%1) \n\t"\
+ MMABS(%%xmm0, %%xmm7)\
+ MMABS(%%xmm1, %%xmm7)\
+ MMABS_SUM(%%xmm2, %%xmm7, %%xmm0)\
+ MMABS_SUM(%%xmm3, %%xmm7, %%xmm1)\
+ MMABS_SUM(%%xmm4, %%xmm7, %%xmm0)\
+ MMABS_SUM(%%xmm5, %%xmm7, %%xmm1)\
+ MMABS_SUM(%%xmm6, %%xmm7, %%xmm0)\
+ "movdqa (%1), %%xmm2 \n\t"\
+ MMABS_SUM(%%xmm2, %%xmm7, %%xmm1)\
+ "paddusw %%xmm1, %%xmm0 \n\t"
+#endif
+
+#define LOAD4(o, a, b, c, d)\
+ "movq "#o"(%1), "#a" \n\t"\
+ "movq "#o"+8(%1), "#b" \n\t"\
+ "movq "#o"+16(%1), "#c" \n\t"\
+ "movq "#o"+24(%1), "#d" \n\t"\
+
+#define STORE4(o, a, b, c, d)\
+ "movq "#a", "#o"(%1) \n\t"\
+ "movq "#b", "#o"+8(%1) \n\t"\
+ "movq "#c", "#o"+16(%1) \n\t"\
+ "movq "#d", "#o"+24(%1) \n\t"\
+
+/* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
+ * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
+ * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
+#define HSUM_MMX(a, t, dst)\
+ "movq "#a", "#t" \n\t"\
+ "psrlq $32, "#a" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "movq "#a", "#t" \n\t"\
+ "psrlq $16, "#a" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "movd "#a", "#dst" \n\t"\
+
+#define HSUM_MMX2(a, t, dst)\
+ "pshufw $0x0E, "#a", "#t" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "pshufw $0x01, "#a", "#t" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "movd "#a", "#dst" \n\t"\
+
+#define HSUM_SSE2(a, t, dst)\
+ "movhlps "#a", "#t" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "pshuflw $0x0E, "#a", "#t" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "pshuflw $0x01, "#a", "#t" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "movd "#a", "#dst" \n\t"\
+
+#define HADAMARD8_DIFF_MMX(cpu) \
+static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
+ DECLARE_ALIGNED_8(uint64_t, temp[13]);\
+ int sum;\
+\
+ assert(h==8);\
+\
+ DIFF_PIXELS_4x8(src1, src2, stride, temp[0]);\
+\
+ asm volatile(\
+ HADAMARD48\
+\
+ "movq %%mm7, 96(%1) \n\t"\
+\
+ TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
+ STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)\
+\
+ "movq 96(%1), %%mm7 \n\t"\
+ TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
+ STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)\
+\
+ : "=r" (sum)\
+ : "r"(temp)\
+ );\
+\
+ DIFF_PIXELS_4x8(src1+4, src2+4, stride, temp[4]);\
+\
+ asm volatile(\
+ HADAMARD48\
+\
+ "movq %%mm7, 96(%1) \n\t"\
+\
+ TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
+ STORE4(32, %%mm0, %%mm3, %%mm7, %%mm2)\
+\
+ "movq 96(%1), %%mm7 \n\t"\
+ TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
+ "movq %%mm7, %%mm5 \n\t"/*FIXME remove*/\
+ "movq %%mm6, %%mm7 \n\t"\
+ "movq %%mm0, %%mm6 \n\t"\
+\
+ LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)\
+\
+ HADAMARD48\
+ "movq %%mm7, 64(%1) \n\t"\
+ MMABS(%%mm0, %%mm7)\
+ MMABS(%%mm1, %%mm7)\
+ MMABS_SUM(%%mm2, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm3, %%mm7, %%mm1)\
+ MMABS_SUM(%%mm4, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm5, %%mm7, %%mm1)\
+ MMABS_SUM(%%mm6, %%mm7, %%mm0)\
+ "movq 64(%1), %%mm2 \n\t"\
+ MMABS_SUM(%%mm2, %%mm7, %%mm1)\
+ "paddusw %%mm1, %%mm0 \n\t"\
+ "movq %%mm0, 64(%1) \n\t"\
+\
+ LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)\
+ LOAD4(32, %%mm4, %%mm5, %%mm6, %%mm7)\
+\
+ HADAMARD48\
+ "movq %%mm7, (%1) \n\t"\
+ MMABS(%%mm0, %%mm7)\
+ MMABS(%%mm1, %%mm7)\
+ MMABS_SUM(%%mm2, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm3, %%mm7, %%mm1)\
+ MMABS_SUM(%%mm4, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm5, %%mm7, %%mm1)\
+ MMABS_SUM(%%mm6, %%mm7, %%mm0)\
+ "movq (%1), %%mm2 \n\t"\
+ MMABS_SUM(%%mm2, %%mm7, %%mm1)\
+ "paddusw 64(%1), %%mm0 \n\t"\
+ "paddusw %%mm1, %%mm0 \n\t"\
+\
+ HSUM(%%mm0, %%mm1, %0)\
+\
+ : "=r" (sum)\
+ : "r"(temp)\
+ );\
+ return sum&0xFFFF;\
+}\
+WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
+
+#define HADAMARD8_DIFF_SSE2(cpu) \
+static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
+ DECLARE_ALIGNED_16(uint64_t, temp[4]);\
+ int sum;\
+\
+ assert(h==8);\
+\
+ DIFF_PIXELS_8x8(src1, src2, stride, temp[0]);\
+\
+ asm volatile(\
+ HADAMARD8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)\
+ TRANSPOSE8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7, (%1))\
+ HADAMARD8(%%xmm0, %%xmm5, %%xmm7, %%xmm3, %%xmm6, %%xmm4, %%xmm2, %%xmm1)\
+ MMABS_SUM_8x8\
+ HSUM_SSE2(%%xmm0, %%xmm1, %0)\
+ : "=r" (sum)\
+ : "r"(temp)\
+ );\
+ return sum&0xFFFF;\
+}\
+WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
+
+#define MMABS(a,z) MMABS_MMX(a,z)
+#define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
+HADAMARD8_DIFF_MMX(mmx)
+#undef MMABS
+#undef HSUM
+
+#define MMABS(a,z) MMABS_MMX2(a,z)
+#define MMABS_SUM_8x8 MMABS_SUM_8x8_SSE2
+#define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
+HADAMARD8_DIFF_MMX(mmx2)
+HADAMARD8_DIFF_SSE2(sse2)
+#undef MMABS
+#undef MMABS_SUM_8x8
+#undef HSUM
+
+#ifdef HAVE_SSSE3
+#define MMABS(a,z) MMABS_SSSE3(a,z)
+#define MMABS_SUM_8x8 MMABS_SUM_8x8_NOSPILL
+HADAMARD8_DIFF_SSE2(ssse3)
+#undef MMABS
+#undef MMABS_SUM_8x8
+#endif
+
+#define DCT_SAD4(m,mm,o)\
+ "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
+ "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
+ "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
+ "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
+ MMABS_SUM(mm##2, mm##6, mm##0)\
+ MMABS_SUM(mm##3, mm##7, mm##1)\
+ MMABS_SUM(mm##4, mm##6, mm##0)\
+ MMABS_SUM(mm##5, mm##7, mm##1)\
+
+#define DCT_SAD_MMX\
+ "pxor %%mm0, %%mm0 \n\t"\
+ "pxor %%mm1, %%mm1 \n\t"\
+ DCT_SAD4(q, %%mm, 0)\
+ DCT_SAD4(q, %%mm, 8)\
+ DCT_SAD4(q, %%mm, 64)\
+ DCT_SAD4(q, %%mm, 72)\
+ "paddusw %%mm1, %%mm0 \n\t"\
+ HSUM(%%mm0, %%mm1, %0)
+
+#define DCT_SAD_SSE2\
+ "pxor %%xmm0, %%xmm0 \n\t"\
+ "pxor %%xmm1, %%xmm1 \n\t"\
+ DCT_SAD4(dqa, %%xmm, 0)\
+ DCT_SAD4(dqa, %%xmm, 64)\
+ "paddusw %%xmm1, %%xmm0 \n\t"\
+ HSUM(%%xmm0, %%xmm1, %0)
+
+#define DCT_SAD_FUNC(cpu) \
+static int sum_abs_dctelem_##cpu(DCTELEM *block){\
+ int sum;\
+ asm volatile(\
+ DCT_SAD\
+ :"=r"(sum)\
+ :"r"(block)\
+ );\
+ return sum&0xFFFF;\
+}
+
+#define DCT_SAD DCT_SAD_MMX
+#define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
+#define MMABS(a,z) MMABS_MMX(a,z)
+DCT_SAD_FUNC(mmx)
+#undef MMABS
+#undef HSUM
+
+#define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
+#define MMABS(a,z) MMABS_MMX2(a,z)
+DCT_SAD_FUNC(mmx2)
+#undef HSUM
+#undef DCT_SAD
+
+#define DCT_SAD DCT_SAD_SSE2
+#define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
+DCT_SAD_FUNC(sse2)
+#undef MMABS
+
+#ifdef HAVE_SSSE3
+#define MMABS(a,z) MMABS_SSSE3(a,z)
+DCT_SAD_FUNC(ssse3)
+#undef MMABS
+#endif
+#undef HSUM
+#undef DCT_SAD
+
+static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
+ int sum;
+ long i=size;
+ asm volatile(
+ "pxor %%mm4, %%mm4 \n"
+ "1: \n"
+ "sub $8, %0 \n"
+ "movq (%2,%0), %%mm2 \n"
+ "movq (%3,%0,2), %%mm0 \n"
+ "movq 8(%3,%0,2), %%mm1 \n"
+ "punpckhbw %%mm2, %%mm3 \n"
+ "punpcklbw %%mm2, %%mm2 \n"
+ "psraw $8, %%mm3 \n"
+ "psraw $8, %%mm2 \n"
+ "psubw %%mm3, %%mm1 \n"
+ "psubw %%mm2, %%mm0 \n"
+ "pmaddwd %%mm1, %%mm1 \n"
+ "pmaddwd %%mm0, %%mm0 \n"
+ "paddd %%mm1, %%mm4 \n"
+ "paddd %%mm0, %%mm4 \n"
+ "jg 1b \n"
+ "movq %%mm4, %%mm3 \n"
+ "psrlq $32, %%mm3 \n"
+ "paddd %%mm3, %%mm4 \n"
+ "movd %%mm4, %1 \n"
+ :"+r"(i), "=r"(sum)
+ :"r"(pix1), "r"(pix2)
+ );
+ return sum;
+}
+
+#define PHADDD(a, t)\
+ "movq "#a", "#t" \n\t"\
+ "psrlq $32, "#a" \n\t"\
+ "paddd "#t", "#a" \n\t"
+/*
+ pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
+ pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
+ pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
+ */
+#define PMULHRW(x, y, s, o)\
+ "pmulhw " #s ", "#x " \n\t"\
+ "pmulhw " #s ", "#y " \n\t"\
+ "paddw " #o ", "#x " \n\t"\
+ "paddw " #o ", "#y " \n\t"\
+ "psraw $1, "#x " \n\t"\
+ "psraw $1, "#y " \n\t"
+#define DEF(x) x ## _mmx
+#define SET_RND MOVQ_WONE
+#define SCALE_OFFSET 1
+
+#include "dsputil_mmx_qns.h"
+
+#undef DEF
+#undef SET_RND
+#undef SCALE_OFFSET
+#undef PMULHRW
+
+#define DEF(x) x ## _3dnow
+#define SET_RND(x)
+#define SCALE_OFFSET 0
+#define PMULHRW(x, y, s, o)\
+ "pmulhrw " #s ", "#x " \n\t"\
+ "pmulhrw " #s ", "#y " \n\t"
+
+#include "dsputil_mmx_qns.h"
+
+#undef DEF
+#undef SET_RND
+#undef SCALE_OFFSET
+#undef PMULHRW
+
+#ifdef HAVE_SSSE3
+#undef PHADDD
+#define DEF(x) x ## _ssse3
+#define SET_RND(x)
+#define SCALE_OFFSET -1
+#define PHADDD(a, t)\
+ "pshufw $0x0E, "#a", "#t" \n\t"\
+ "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
+#define PMULHRW(x, y, s, o)\
+ "pmulhrsw " #s ", "#x " \n\t"\
+ "pmulhrsw " #s ", "#y " \n\t"
+
+#include "dsputil_mmx_qns.h"
+
+#undef DEF
+#undef SET_RND
+#undef SCALE_OFFSET
+#undef PMULHRW
+#undef PHADDD
+#endif //HAVE_SSSE3
+
+
+/* FLAC specific */
+void ff_flac_compute_autocorr_sse2(const int32_t *data, int len, int lag,
+ double *autoc);
+
+
+void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
+{
+ if (mm_flags & MM_MMX) {
+ const int dct_algo = avctx->dct_algo;
+ if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
+ if(mm_flags & MM_SSE2){
+ c->fdct = ff_fdct_sse2;
+ }else if(mm_flags & MM_MMXEXT){
+ c->fdct = ff_fdct_mmx2;
+ }else{
+ c->fdct = ff_fdct_mmx;
+ }
+ }
+
+ c->get_pixels = get_pixels_mmx;
+ c->diff_pixels = diff_pixels_mmx;
+ c->pix_sum = pix_sum16_mmx;
+
+ c->diff_bytes= diff_bytes_mmx;
+ c->sum_abs_dctelem= sum_abs_dctelem_mmx;
+
+ c->hadamard8_diff[0]= hadamard8_diff16_mmx;
+ c->hadamard8_diff[1]= hadamard8_diff_mmx;
+
+ c->pix_norm1 = pix_norm1_mmx;
+ c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
+ c->sse[1] = sse8_mmx;
+ c->vsad[4]= vsad_intra16_mmx;
+
+ c->nsse[0] = nsse16_mmx;
+ c->nsse[1] = nsse8_mmx;
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->vsad[0] = vsad16_mmx;
+ }
+
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->try_8x8basis= try_8x8basis_mmx;
+ }
+ c->add_8x8basis= add_8x8basis_mmx;
+
+ c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
+
+
+ if (mm_flags & MM_MMXEXT) {
+ c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
+ c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
+ c->hadamard8_diff[1]= hadamard8_diff_mmx2;
+ c->vsad[4]= vsad_intra16_mmx2;
+
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->vsad[0] = vsad16_mmx2;
+ }
+
+ c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
+ }
+
+ if(mm_flags & MM_SSE2){
+ c->sum_abs_dctelem= sum_abs_dctelem_sse2;
+ c->hadamard8_diff[0]= hadamard8_diff16_sse2;
+ c->hadamard8_diff[1]= hadamard8_diff_sse2;
+ if (ENABLE_FLAC_ENCODER)
+ c->flac_compute_autocorr = ff_flac_compute_autocorr_sse2;
+ }
+
+#ifdef HAVE_SSSE3
+ if(mm_flags & MM_SSSE3){
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->try_8x8basis= try_8x8basis_ssse3;
+ }
+ c->add_8x8basis= add_8x8basis_ssse3;
+ c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
+ c->hadamard8_diff[0]= hadamard8_diff16_ssse3;
+ c->hadamard8_diff[1]= hadamard8_diff_ssse3;
+ }
+#endif
+
+ if(mm_flags & MM_3DNOW){
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->try_8x8basis= try_8x8basis_3dnow;
+ }
+ c->add_8x8basis= add_8x8basis_3dnow;
+ }
+ }
+
+ dsputil_init_pix_mmx(c, avctx);
+}