aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/i386
diff options
context:
space:
mode:
authorFabrice Bellard <fabrice@bellard.org>2001-07-22 14:18:56 +0000
committerFabrice Bellard <fabrice@bellard.org>2001-07-22 14:18:56 +0000
commitde6d9b6404bfd1c589799142da5a95428f146edd (patch)
tree75ae0cbb74bdfafb6f1a40922db111a103db3bcf /libavcodec/i386
parent1b58d58ddaf8a8c766a0353885ff504babed0453 (diff)
downloadffmpeg-de6d9b6404bfd1c589799142da5a95428f146edd.tar.gz
Initial revision
Originally committed as revision 5 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/i386')
-rw-r--r--libavcodec/i386/cputest.c102
-rw-r--r--libavcodec/i386/dsputil_mmx.c1061
-rw-r--r--libavcodec/i386/dsputil_mmx_avg.h352
-rw-r--r--libavcodec/i386/fdct_mmx.s507
-rw-r--r--libavcodec/i386/fdctdata.c143
-rw-r--r--libavcodec/i386/mmx.h536
-rw-r--r--libavcodec/i386/sad_mmx.s798
7 files changed, 3499 insertions, 0 deletions
diff --git a/libavcodec/i386/cputest.c b/libavcodec/i386/cputest.c
new file mode 100644
index 0000000000..9181f413d5
--- /dev/null
+++ b/libavcodec/i386/cputest.c
@@ -0,0 +1,102 @@
+/* Cpu detection code, extracted from mmx.h ((c)1997-99 by H. Dietz
+ and R. Fisher). Converted to C and improved by Gerard Lantau */
+
+#include <stdlib.h>
+#include "../dsputil.h"
+
+#define cpuid(index,eax,ebx,ecx,edx) \
+ asm ("cpuid" \
+ : "=a" (eax), "=b" (ebx), \
+ "=c" (ecx), "=d" (edx) \
+ : "a" (index) \
+ : "cc")
+
+/* Function to test if multimedia instructions are supported... */
+int mm_support(void)
+{
+ int rval;
+ int eax, ebx, ecx, edx;
+
+
+ __asm__ __volatile__ (
+ /* See if CPUID instruction is supported ... */
+ /* ... Get copies of EFLAGS into eax and ecx */
+ "pushf\n\t"
+ "popl %0\n\t"
+ "movl %0, %1\n\t"
+
+ /* ... Toggle the ID bit in one copy and store */
+ /* to the EFLAGS reg */
+ "xorl $0x200000, %0\n\t"
+ "push %0\n\t"
+ "popf\n\t"
+
+ /* ... Get the (hopefully modified) EFLAGS */
+ "pushf\n\t"
+ "popl %0\n\t"
+ : "=a" (eax), "=c" (ecx)
+ :
+ : "cc"
+ );
+
+ if (eax == ecx)
+ return 0; /* CPUID not supported */
+
+ cpuid(0, eax, ebx, ecx, edx);
+
+ if (ebx == 0x756e6547 &&
+ edx == 0x49656e69 &&
+ ecx == 0x6c65746e) {
+
+ /* intel */
+ inteltest:
+ cpuid(1, eax, ebx, ecx, edx);
+ if ((edx & 0x00800000) == 0)
+ return 0;
+ rval = MM_MMX;
+ if (edx & 0x02000000)
+ rval |= MM_MMXEXT | MM_SSE;
+ if (edx & 0x04000000)
+ rval |= MM_SSE2;
+ return rval;
+ } else if (ebx == 0x68747541 &&
+ edx == 0x69746e65 &&
+ ecx == 0x444d4163) {
+ /* AMD */
+ cpuid(0x80000000, eax, ebx, ecx, edx);
+ if ((unsigned)eax < 0x80000001)
+ goto inteltest;
+ cpuid(0x80000001, eax, ebx, ecx, edx);
+ if ((edx & 0x00800000) == 0)
+ return 0;
+ rval = MM_MMX;
+ if (edx & 0x80000000)
+ rval |= MM_3DNOW;
+ if (edx & 0x00400000)
+ rval |= MM_MMXEXT;
+ return rval;
+ } else if (ebx == 0x69727943 &&
+ edx == 0x736e4978 &&
+ ecx == 0x64616574) {
+ /* Cyrix Section */
+ /* See if extended CPUID level 80000001 is supported */
+ /* The value of CPUID/80000001 for the 6x86MX is undefined
+ according to the Cyrix CPU Detection Guide (Preliminary
+ Rev. 1.01 table 1), so we'll check the value of eax for
+ CPUID/0 to see if standard CPUID level 2 is supported.
+ According to the table, the only CPU which supports level
+ 2 is also the only one which supports extended CPUID levels.
+ */
+ if (eax != 2)
+ goto inteltest;
+ cpuid(0x80000001, eax, ebx, ecx, edx);
+ if ((eax & 0x00800000) == 0)
+ return 0;
+ rval = MM_MMX;
+ if (eax & 0x01000000)
+ rval |= MM_MMXEXT;
+ return rval;
+ } else {
+ return 0;
+ }
+}
diff --git a/libavcodec/i386/dsputil_mmx.c b/libavcodec/i386/dsputil_mmx.c
new file mode 100644
index 0000000000..701b70283d
--- /dev/null
+++ b/libavcodec/i386/dsputil_mmx.c
@@ -0,0 +1,1061 @@
+/*
+ * MMX optimized DSP utils
+ * Copyright (c) 2000, 2001 Gerard Lantau.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
+ */
+
+#include "../dsputil.h"
+
+int pix_abs16x16_mmx(UINT8 *blk1, UINT8 *blk2, int lx, int h);
+int pix_abs16x16_sse(UINT8 *blk1, UINT8 *blk2, int lx, int h);
+int pix_abs16x16_x2_mmx(UINT8 *blk1, UINT8 *blk2, int lx, int h);
+int pix_abs16x16_y2_mmx(UINT8 *blk1, UINT8 *blk2, int lx, int h);
+int pix_abs16x16_xy2_mmx(UINT8 *blk1, UINT8 *blk2, int lx, int h);
+
+/* pixel operations */
+static const unsigned short mm_wone[4] __attribute__ ((aligned(8))) = { 0x1, 0x1, 0x1, 0x1 };
+static const unsigned short mm_wtwo[4] __attribute__ ((aligned(8))) = { 0x2, 0x2, 0x2, 0x2 };
+
+/***********************************/
+/* 3Dnow specific */
+
+#define DEF(x) x ## _3dnow
+/* for Athlons PAVGUSB is prefered */
+#define PAVGB "pavgusb"
+
+#include "dsputil_mmx_avg.h"
+
+#undef DEF
+#undef PAVGB
+
+/***********************************/
+/* MMX2 specific */
+
+#define DEF(x) x ## _sse
+
+/* Introduced only in MMX2 set */
+#define PAVGB "pavgb"
+
+#include "dsputil_mmx_avg.h"
+
+#undef DEF
+#undef PAVGB
+
+/***********************************/
+/* standard MMX */
+
+static void get_pixels_mmx(DCTELEM *block, const UINT8 *pixels, int line_size)
+{
+ DCTELEM *p;
+ const UINT8 *pix;
+ int i;
+
+ /* read the pixels */
+ p = block;
+ pix = pixels;
+ __asm __volatile("pxor %%mm7, %%mm7":::"memory");
+ for(i=0;i<4;i++) {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm2, 8%0\n\t"
+ "movq %%mm1, 16%0\n\t"
+ "movq %%mm3, 24%0\n\t"
+ :"=m"(*p)
+ :"m"(*pix), "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size*2;
+ p += 16;
+ }
+ emms();
+}
+
+static void put_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line_size)
+{
+ const DCTELEM *p;
+ UINT8 *pix;
+ int i;
+
+ /* read the pixels */
+ p = block;
+ pix = pixels;
+ for(i=0;i<2;i++) {
+ __asm __volatile(
+ "movq %4, %%mm0\n\t"
+ "movq 8%4, %%mm1\n\t"
+ "movq 16%4, %%mm2\n\t"
+ "movq 24%4, %%mm3\n\t"
+ "movq 32%4, %%mm4\n\t"
+ "movq 40%4, %%mm5\n\t"
+ "movq 48%4, %%mm6\n\t"
+ "movq 56%4, %%mm7\n\t"
+ "packuswb %%mm1, %%mm0\n\t"
+ "packuswb %%mm3, %%mm2\n\t"
+ "packuswb %%mm5, %%mm4\n\t"
+ "packuswb %%mm7, %%mm6\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm2, %1\n\t"
+ "movq %%mm4, %2\n\t"
+ "movq %%mm6, %3\n\t"
+ :"=m"(*pix), "=m"(*(pix+line_size))
+ ,"=m"(*(pix+line_size*2)), "=m"(*(pix+line_size*3))
+ :"m"(*p)
+ :"memory");
+ pix += line_size*4;
+ p += 32;
+ }
+ emms();
+}
+
+static void add_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line_size)
+{
+ const DCTELEM *p;
+ UINT8 *pix;
+ int i;
+
+ /* read the pixels */
+ p = block;
+ pix = pixels;
+ __asm __volatile("pxor %%mm7, %%mm7":::"memory");
+ for(i=0;i<4;i++) {
+ __asm __volatile(
+ "movq %2, %%mm0\n\t"
+ "movq 8%2, %%mm1\n\t"
+ "movq 16%2, %%mm2\n\t"
+ "movq 24%2, %%mm3\n\t"
+ "movq %0, %%mm4\n\t"
+ "movq %1, %%mm6\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpckhbw %%mm7, %%mm5\n\t"
+ "paddsw %%mm4, %%mm0\n\t"
+ "paddsw %%mm5, %%mm1\n\t"
+ "movq %%mm6, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm6\n\t"
+ "punpckhbw %%mm7, %%mm5\n\t"
+ "paddsw %%mm6, %%mm2\n\t"
+ "paddsw %%mm5, %%mm3\n\t"
+ "packuswb %%mm1, %%mm0\n\t"
+ "packuswb %%mm3, %%mm2\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm2, %1\n\t"
+ :"=m"(*pix), "=m"(*(pix+line_size))
+ :"m"(*p)
+ :"memory");
+ pix += line_size*2;
+ p += 16;
+ }
+ emms();
+}
+
+static void put_pixels_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ int dh, hh;
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ hh=h>>2;
+ dh=h&3;
+ while(hh--) {
+ __asm __volatile(
+ "movq %4, %%mm0\n\t"
+ "movq %5, %%mm1\n\t"
+ "movq %6, %%mm2\n\t"
+ "movq %7, %%mm3\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm1, %1\n\t"
+ "movq %%mm2, %2\n\t"
+ "movq %%mm3, %3\n\t"
+ :"=m"(*p), "=m"(*(p+line_size)), "=m"(*(p+line_size*2)), "=m"(*(p+line_size*3))
+ :"m"(*pix), "m"(*(pix+line_size)), "m"(*(pix+line_size*2)), "m"(*(pix+line_size*3))
+ :"memory");
+ pix = pix + line_size*4;
+ p = p + line_size*4;
+ }
+ while(dh--) {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix = pix + line_size;
+ p = p + line_size;
+ }
+ emms();
+}
+
+static void put_pixels_x2_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm4\n\t"
+ ::"m"(mm_wone[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq 1%1, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm4, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size; p += line_size;
+ } while (--h);
+ emms();
+}
+
+static void put_pixels_y2_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm4\n\t"
+ ::"m"(mm_wone[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm4, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix),
+ "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += line_size;
+ } while (--h);
+ emms();
+}
+
+static void put_pixels_xy2_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6\n\t"
+ ::"m"(mm_wtwo[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq 1%1, %%mm4\n\t"
+ "movq 1%2, %%mm5\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "movq %%mm4, %%mm1\n\t"
+ "movq %%mm5, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpcklbw %%mm7, %%mm5\n\t"
+ "punpckhbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm5, %%mm4\n\t"
+ "paddusw %%mm3, %%mm1\n\t"
+ "paddusw %%mm6, %%mm4\n\t"
+ "paddusw %%mm6, %%mm1\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm1, %%mm2\n\t"
+ "psrlw $2, %%mm0\n\t"
+ "psrlw $2, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix),
+ "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += line_size;
+ } while(--h);
+ emms();
+}
+
+static void put_no_rnd_pixels_x2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile("pxor %%mm7, %%mm7\n\t":::"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq 1%1, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size;
+ p += line_size;
+ } while (--h);
+ emms();
+}
+
+static void put_no_rnd_pixels_y2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile("pxor %%mm7, %%mm7\n\t":::"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix),
+ "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += line_size;
+ } while(--h);
+ emms();
+}
+
+static void put_no_rnd_pixels_xy2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6\n\t"
+ ::"m"(mm_wone[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq 1%1, %%mm4\n\t"
+ "movq 1%2, %%mm5\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "movq %%mm4, %%mm1\n\t"
+ "movq %%mm5, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpcklbw %%mm7, %%mm5\n\t"
+ "punpckhbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm5, %%mm4\n\t"
+ "paddusw %%mm3, %%mm1\n\t"
+ "paddusw %%mm6, %%mm4\n\t"
+ "paddusw %%mm6, %%mm1\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm1, %%mm2\n\t"
+ "psrlw $2, %%mm0\n\t"
+ "psrlw $2, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix),
+ "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += line_size;
+ } while(--h);
+ emms();
+}
+
+static void avg_pixels_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6\n\t"
+ ::"m"(mm_wone[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "paddusw %%mm6, %%mm0\n\t"
+ "paddusw %%mm6, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size;
+ p += line_size;
+ }
+ while (--h);
+ emms();
+}
+
+static void avg_pixels_x2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6\n\t"
+ ::"m"(mm_wone[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm1\n\t"
+ "movq %0, %%mm0\n\t"
+ "movq 1%1, %%mm4\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpckhbw %%mm7, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "paddusw %%mm4, %%mm1\n\t"
+ "paddusw %%mm5, %%mm3\n\t"
+ "paddusw %%mm6, %%mm1\n\t"
+ "paddusw %%mm6, %%mm3\n\t"
+ "psrlw $1, %%mm1\n\t"
+ "psrlw $1, %%mm3\n\t"
+ "paddusw %%mm6, %%mm0\n\t"
+ "paddusw %%mm6, %%mm2\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size;
+ p += line_size;
+ } while (--h);
+ emms();
+}
+
+static void avg_pixels_y2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6\n\t"
+ ::"m"(mm_wone[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm1\n\t"
+ "movq %0, %%mm0\n\t"
+ "movq %2, %%mm4\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpckhbw %%mm7, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "paddusw %%mm4, %%mm1\n\t"
+ "paddusw %%mm5, %%mm3\n\t"
+ "paddusw %%mm6, %%mm1\n\t"
+ "paddusw %%mm6, %%mm3\n\t"
+ "psrlw $1, %%mm1\n\t"
+ "psrlw $1, %%mm3\n\t"
+ "paddusw %%mm6, %%mm0\n\t"
+ "paddusw %%mm6, %%mm2\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix), "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += line_size ;
+ } while(--h);
+ emms();
+}
+
+static void avg_pixels_xy2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6\n\t"
+ ::"m"(mm_wtwo[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq 1%1, %%mm4\n\t"
+ "movq 1%2, %%mm5\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "movq %%mm4, %%mm1\n\t"
+ "movq %%mm5, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpcklbw %%mm7, %%mm5\n\t"
+ "punpckhbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm5, %%mm4\n\t"
+ "paddusw %%mm3, %%mm1\n\t"
+ "paddusw %%mm6, %%mm4\n\t"
+ "paddusw %%mm6, %%mm1\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm1, %%mm2\n\t"
+ "movq %3, %%mm5\n\t"
+ "psrlw $2, %%mm0\n\t"
+ "movq %0, %%mm1\n\t"
+ "psrlw $2, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "paddusw %%mm5, %%mm0\n\t"
+ "paddusw %%mm5, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix),
+ "m"(*(pix+line_size)), "m"(mm_wone[0])
+ :"memory");
+ pix += line_size;
+ p += line_size ;
+ } while(--h);
+ emms();
+}
+
+static void avg_no_rnd_pixels_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile("pxor %%mm7, %%mm7\n\t":::"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size;
+ p += line_size ;
+ } while (--h);
+ emms();
+}
+
+static void avg_no_rnd_pixels_x2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t":::"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq 1%1, %%mm1\n\t"
+ "movq %0, %%mm4\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpckhbw %%mm7, %%mm5\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm5, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size;
+ p += line_size;
+ } while (--h);
+ emms();
+}
+
+static void avg_no_rnd_pixels_y2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t":::"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq %0, %%mm4\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpckhbw %%mm7, %%mm5\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm5, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix), "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += line_size ;
+ } while(--h);
+ emms();
+}
+
+static void avg_no_rnd_pixels_xy2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6\n\t"
+ ::"m"(mm_wone[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq 1%1, %%mm4\n\t"
+ "movq 1%2, %%mm5\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "movq %%mm4, %%mm1\n\t"
+ "movq %%mm5, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpcklbw %%mm7, %%mm5\n\t"
+ "punpckhbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm5, %%mm4\n\t"
+ "paddusw %%mm3, %%mm1\n\t"
+ "paddusw %%mm6, %%mm4\n\t"
+ "paddusw %%mm6, %%mm1\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm1, %%mm2\n\t"
+ "movq %0, %%mm1\n\t"
+ "psrlw $2, %%mm0\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "psrlw $2, %%mm2\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "psrlw $1, %%mm0\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix),
+ "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += line_size;
+ } while(--h);
+ emms();
+}
+
+static void sub_pixels_mmx( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
+{
+ DCTELEM *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile("pxor %%mm7, %%mm7":::"memory");
+ do {
+ __asm __volatile(
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm2\n\t"
+ "movq 8%0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "psubsw %%mm2, %%mm0\n\t"
+ "psubsw %%mm3, %%mm1\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm1, 8%0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size;
+ p += 8;
+ } while (--h);
+ emms();
+}
+
+static void sub_pixels_x2_mmx( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
+{
+ DCTELEM *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6"
+ ::"m"(mm_wone[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm2\n\t"
+ "movq 8%0, %%mm1\n\t"
+ "movq 1%1, %%mm4\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpckhbw %%mm7, %%mm5\n\t"
+ "paddusw %%mm4, %%mm2\n\t"
+ "paddusw %%mm5, %%mm3\n\t"
+ "paddusw %%mm6, %%mm2\n\t"
+ "paddusw %%mm6, %%mm3\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "psrlw $1, %%mm3\n\t"
+ "psubsw %%mm2, %%mm0\n\t"
+ "psubsw %%mm3, %%mm1\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm1, 8%0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size;
+ p += 8;
+ } while (--h);
+ emms();
+}
+
+static void sub_pixels_y2_mmx( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
+{
+ DCTELEM *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6"
+ ::"m"(mm_wone[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm2\n\t"
+ "movq 8%0, %%mm1\n\t"
+ "movq %2, %%mm4\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "punpcklbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpckhbw %%mm7, %%mm5\n\t"
+ "paddusw %%mm4, %%mm2\n\t"
+ "paddusw %%mm5, %%mm3\n\t"
+ "paddusw %%mm6, %%mm2\n\t"
+ "paddusw %%mm6, %%mm3\n\t"
+ "psrlw $1, %%mm2\n\t"
+ "psrlw $1, %%mm3\n\t"
+ "psubsw %%mm2, %%mm0\n\t"
+ "psubsw %%mm3, %%mm1\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm1, 8%0\n\t"
+ :"=m"(*p)
+ :"m"(*pix), "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += 8;
+ } while (--h);
+ emms();
+}
+
+static void sub_pixels_xy2_mmx( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
+{
+ DCTELEM *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6\n\t"
+ ::"m"(mm_wtwo[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq 1%1, %%mm4\n\t"
+ "movq 1%2, %%mm5\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "movq %%mm4, %%mm1\n\t"
+ "movq %%mm5, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpcklbw %%mm7, %%mm5\n\t"
+ "punpckhbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm5, %%mm4\n\t"
+ "paddusw %%mm3, %%mm1\n\t"
+ "paddusw %%mm6, %%mm4\n\t"
+ "paddusw %%mm6, %%mm1\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm1, %%mm2\n\t"
+ "movq %0, %%mm1\n\t"
+ "movq 8%0, %%mm3\n\t"
+ "psrlw $2, %%mm0\n\t"
+ "psrlw $2, %%mm2\n\t"
+ "psubsw %%mm0, %%mm1\n\t"
+ "psubsw %%mm2, %%mm3\n\t"
+ "movq %%mm1, %0\n\t"
+ "movq %%mm3, 8%0\n\t"
+ :"=m"(*p)
+ :"m"(*pix),
+ "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += 8 ;
+ } while(--h);
+ emms();
+}
+
+void dsputil_init_mmx(void)
+{
+ mm_flags = mm_support();
+#if 0
+ printf("CPU flags:");
+ if (mm_flags & MM_MMX)
+ printf(" mmx");
+ if (mm_flags & MM_MMXEXT)
+ printf(" mmxext");
+ if (mm_flags & MM_3DNOW)
+ printf(" 3dnow");
+ if (mm_flags & MM_SSE)
+ printf(" sse");
+ if (mm_flags & MM_SSE2)
+ printf(" sse2");
+ printf("\n");
+#endif
+
+ if (mm_flags & MM_MMX) {
+ get_pixels = get_pixels_mmx;
+ put_pixels_clamped = put_pixels_clamped_mmx;
+ add_pixels_clamped = add_pixels_clamped_mmx;
+
+ pix_abs16x16 = pix_abs16x16_mmx;
+ pix_abs16x16_x2 = pix_abs16x16_x2_mmx;
+ pix_abs16x16_y2 = pix_abs16x16_y2_mmx;
+ pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx;
+ av_fdct = fdct_mmx;
+
+ put_pixels_tab[0] = put_pixels_mmx;
+ put_pixels_tab[1] = put_pixels_x2_mmx;
+ put_pixels_tab[2] = put_pixels_y2_mmx;
+ put_pixels_tab[3] = put_pixels_xy2_mmx;
+
+ put_no_rnd_pixels_tab[0] = put_pixels_mmx;
+ put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx;
+ put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx;
+ put_no_rnd_pixels_tab[3] = put_no_rnd_pixels_xy2_mmx;
+
+ avg_pixels_tab[0] = avg_pixels_mmx;
+ avg_pixels_tab[1] = avg_pixels_x2_mmx;
+ avg_pixels_tab[2] = avg_pixels_y2_mmx;
+ avg_pixels_tab[3] = avg_pixels_xy2_mmx;
+
+ avg_no_rnd_pixels_tab[0] = avg_no_rnd_pixels_mmx;
+ avg_no_rnd_pixels_tab[1] = avg_no_rnd_pixels_x2_mmx;
+ avg_no_rnd_pixels_tab[2] = avg_no_rnd_pixels_y2_mmx;
+ avg_no_rnd_pixels_tab[3] = avg_no_rnd_pixels_xy2_mmx;
+
+ sub_pixels_tab[0] = sub_pixels_mmx;
+ sub_pixels_tab[1] = sub_pixels_x2_mmx;
+ sub_pixels_tab[2] = sub_pixels_y2_mmx;
+ sub_pixels_tab[3] = sub_pixels_xy2_mmx;
+
+ if (mm_flags & MM_MMXEXT) {
+ pix_abs16x16 = pix_abs16x16_sse;
+ }
+
+ if (mm_flags & MM_SSE) {
+ put_pixels_tab[1] = put_pixels_x2_sse;
+ put_pixels_tab[2] = put_pixels_y2_sse;
+
+ avg_pixels_tab[0] = avg_pixels_sse;
+ avg_pixels_tab[1] = avg_pixels_x2_sse;
+ avg_pixels_tab[2] = avg_pixels_y2_sse;
+ avg_pixels_tab[3] = avg_pixels_xy2_sse;
+
+ sub_pixels_tab[1] = sub_pixels_x2_sse;
+ sub_pixels_tab[2] = sub_pixels_y2_sse;
+ } else if (mm_flags & MM_3DNOW) {
+ put_pixels_tab[1] = put_pixels_x2_3dnow;
+ put_pixels_tab[2] = put_pixels_y2_3dnow;
+
+ avg_pixels_tab[0] = avg_pixels_3dnow;
+ avg_pixels_tab[1] = avg_pixels_x2_3dnow;
+ avg_pixels_tab[2] = avg_pixels_y2_3dnow;
+ avg_pixels_tab[3] = avg_pixels_xy2_3dnow;
+
+ sub_pixels_tab[1] = sub_pixels_x2_3dnow;
+ sub_pixels_tab[2] = sub_pixels_y2_3dnow;
+ }
+ }
+}
diff --git a/libavcodec/i386/dsputil_mmx_avg.h b/libavcodec/i386/dsputil_mmx_avg.h
new file mode 100644
index 0000000000..e47b83f62d
--- /dev/null
+++ b/libavcodec/i386/dsputil_mmx_avg.h
@@ -0,0 +1,352 @@
+/*
+ * DSP utils : average functions are compiled twice for 3dnow/mmx2
+ * Copyright (c) 2000, 2001 Gerard Lantau.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
+ */
+
+static void DEF(put_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ int dh, hh;
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ hh=h>>2;
+ dh=h&3;
+ while(hh--) {
+ __asm __volatile(
+ "movq %4, %%mm0\n\t"
+ "movq 1%4, %%mm1\n\t"
+ "movq %5, %%mm2\n\t"
+ "movq 1%5, %%mm3\n\t"
+ "movq %6, %%mm4\n\t"
+ "movq 1%6, %%mm5\n\t"
+ "movq %7, %%mm6\n\t"
+ "movq 1%7, %%mm7\n\t"
+ PAVGB" %%mm1, %%mm0\n\t"
+ PAVGB" %%mm3, %%mm2\n\t"
+ PAVGB" %%mm5, %%mm4\n\t"
+ PAVGB" %%mm7, %%mm6\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm2, %1\n\t"
+ "movq %%mm4, %2\n\t"
+ "movq %%mm6, %3\n\t"
+ :"=m"(*p), "=m"(*(p+line_size)), "=m"(*(p+line_size*2)), "=m"(*(p+line_size*3))
+ :"m"(*pix), "m"(*(pix+line_size)), "m"(*(pix+line_size*2)), "m"(*(pix+line_size*3))
+ :"memory");
+ pix += line_size*4; p += line_size*4;
+ }
+ while(dh--) {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq 1%1, %%mm1\n\t"
+ PAVGB" %%mm1, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size; p += line_size;
+ }
+ emms();
+}
+
+static void DEF(put_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ int dh, hh;
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+
+ hh=h>>1;
+ dh=h&1;
+ while(hh--) {
+ __asm __volatile(
+ "movq %2, %%mm0\n\t"
+ "movq %3, %%mm1\n\t"
+ "movq %4, %%mm2\n\t"
+ PAVGB" %%mm1, %%mm0\n\t"
+ PAVGB" %%mm2, %%mm1\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm1, %1\n\t"
+ :"=m"(*p), "=m"(*(p+line_size))
+ :"m"(*pix), "m"(*(pix+line_size)),
+ "m"(*(pix+line_size*2))
+ :"memory");
+ pix += line_size*2;
+ p += line_size*2;
+ }
+ if(dh) {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ PAVGB" %%mm1, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix),
+ "m"(*(pix+line_size))
+ :"memory");
+ }
+ emms();
+}
+
+static void DEF(avg_pixels)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ int dh, hh;
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ hh=h>>2;
+ dh=h&3;
+ while(hh--) {
+ __asm __volatile(
+ "movq %0, %%mm0\n\t"
+ "movq %4, %%mm1\n\t"
+ "movq %1, %%mm2\n\t"
+ "movq %5, %%mm3\n\t"
+ "movq %2, %%mm4\n\t"
+ "movq %6, %%mm5\n\t"
+ "movq %3, %%mm6\n\t"
+ "movq %7, %%mm7\n\t"
+ PAVGB" %%mm1, %%mm0\n\t"
+ PAVGB" %%mm3, %%mm2\n\t"
+ PAVGB" %%mm5, %%mm4\n\t"
+ PAVGB" %%mm7, %%mm6\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm2, %1\n\t"
+ "movq %%mm4, %2\n\t"
+ "movq %%mm6, %3\n\t"
+ :"=m"(*p), "=m"(*(p+line_size)), "=m"(*(p+line_size*2)), "=m"(*(p+line_size*3))
+ :"m"(*pix), "m"(*(pix+line_size)), "m"(*(pix+line_size*2)), "m"(*(pix+line_size*3))
+ :"memory");
+ pix += line_size*4; p += line_size*4;
+ }
+ while(dh--) {
+ __asm __volatile(
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ PAVGB" %%mm1, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size; p += line_size;
+ }
+ emms();
+}
+
+static void DEF(avg_pixels_x2)( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ int dh, hh;
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ hh=h>>1;
+ dh=h&1;
+ while(hh--) {
+ __asm __volatile(
+ "movq %2, %%mm2\n\t"
+ "movq 1%2, %%mm3\n\t"
+ "movq %3, %%mm4\n\t"
+ "movq 1%3, %%mm5\n\t"
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ PAVGB" %%mm3, %%mm2\n\t"
+ PAVGB" %%mm2, %%mm0\n\t"
+ PAVGB" %%mm5, %%mm4\n\t"
+ PAVGB" %%mm4, %%mm1\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm1, %1\n\t"
+ :"=m"(*p), "=m"(*(p+line_size))
+ :"m"(*pix), "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size*2;
+ p += line_size*2;
+ }
+ if(dh) {
+ __asm __volatile(
+ "movq %1, %%mm1\n\t"
+ "movq 1%1, %%mm2\n\t"
+ "movq %0, %%mm0\n\t"
+ PAVGB" %%mm2, %%mm1\n\t"
+ PAVGB" %%mm1, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ }
+ emms();
+}
+
+static void DEF(avg_pixels_y2)( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ int dh, hh;
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ hh=h>>1;
+ dh=h&1;
+ while(hh--) {
+ __asm __volatile(
+ "movq %2, %%mm2\n\t"
+ "movq %3, %%mm3\n\t"
+ "movq %3, %%mm4\n\t"
+ "movq %4, %%mm5\n\t"
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ PAVGB" %%mm3, %%mm2\n\t"
+ PAVGB" %%mm2, %%mm0\n\t"
+ PAVGB" %%mm5, %%mm4\n\t"
+ PAVGB" %%mm4, %%mm1\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm1, %1\n\t"
+ :"=m"(*p), "=m"(*(p+line_size))
+ :"m"(*pix), "m"(*(pix+line_size)), "m"(*(pix+line_size*2))
+ :"memory");
+ pix += line_size*2;
+ p += line_size*2;
+ }
+ if(dh) {
+ __asm __volatile(
+ "movq %1, %%mm1\n\t"
+ "movq %2, %%mm2\n\t"
+ "movq %0, %%mm0\n\t"
+ PAVGB" %%mm2, %%mm1\n\t"
+ PAVGB" %%mm1, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix), "m"(*(pix+line_size))
+ :"memory");
+ }
+ emms();
+}
+
+static void DEF(avg_pixels_xy2)( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ UINT8 *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %0, %%mm6\n\t"
+ ::"m"(mm_wtwo[0]):"memory");
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0\n\t"
+ "movq %2, %%mm1\n\t"
+ "movq 1%1, %%mm4\n\t"
+ "movq 1%2, %%mm5\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm1, %%mm0\n\t"
+ "paddusw %%mm3, %%mm2\n\t"
+ "movq %%mm4, %%mm1\n\t"
+ "movq %%mm5, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm4\n\t"
+ "punpcklbw %%mm7, %%mm5\n\t"
+ "punpckhbw %%mm7, %%mm1\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "paddusw %%mm5, %%mm4\n\t"
+ "paddusw %%mm3, %%mm1\n\t"
+ "paddusw %%mm6, %%mm4\n\t"
+ "paddusw %%mm6, %%mm1\n\t"
+ "paddusw %%mm4, %%mm0\n\t"
+ "paddusw %%mm1, %%mm2\n\t"
+ "psrlw $2, %%mm0\n\t"
+ "psrlw $2, %%mm2\n\t"
+ "packuswb %%mm2, %%mm0\n\t"
+ PAVGB" %0, %%mm0\n\t"
+ "movq %%mm0, %0\n\t"
+ :"=m"(*p)
+ :"m"(*pix),
+ "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += line_size ;
+ } while(--h);
+ emms();
+}
+
+static void DEF(sub_pixels_x2)( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
+{
+ DCTELEM *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7":::"memory");
+ do {
+ __asm __volatile(
+ "movq 1%1, %%mm2\n\t"
+ "movq %0, %%mm0\n\t"
+ PAVGB" %1, %%mm2\n\t"
+ "movq 8%0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "psubsw %%mm2, %%mm0\n\t"
+ "psubsw %%mm3, %%mm1\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm1, 8%0\n\t"
+ :"=m"(*p)
+ :"m"(*pix)
+ :"memory");
+ pix += line_size;
+ p += 8;
+ } while (--h);
+ emms();
+}
+
+static void DEF(sub_pixels_y2)( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
+{
+ DCTELEM *p;
+ const UINT8 *pix;
+ p = block;
+ pix = pixels;
+ __asm __volatile(
+ "pxor %%mm7, %%mm7":::"memory");
+ do {
+ __asm __volatile(
+ "movq %2, %%mm2\n\t"
+ "movq %0, %%mm0\n\t"
+ PAVGB" %1, %%mm2\n\t"
+ "movq 8%0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "psubsw %%mm2, %%mm0\n\t"
+ "psubsw %%mm3, %%mm1\n\t"
+ "movq %%mm0, %0\n\t"
+ "movq %%mm1, 8%0\n\t"
+ :"=m"(*p)
+ :"m"(*pix), "m"(*(pix+line_size))
+ :"memory");
+ pix += line_size;
+ p += 8;
+ } while (--h);
+ emms();
+}
+
diff --git a/libavcodec/i386/fdct_mmx.s b/libavcodec/i386/fdct_mmx.s
new file mode 100644
index 0000000000..75c67bfe5e
--- /dev/null
+++ b/libavcodec/i386/fdct_mmx.s
@@ -0,0 +1,507 @@
+; //////////////////////////////////////////////////////////////////////////////
+; //
+; // fdctam32.c - AP922 MMX(3D-Now) forward-DCT
+; // ----------
+; // Intel Application Note AP-922 - fast, precise implementation of DCT
+; // http://developer.intel.com/vtune/cbts/appnotes.htm
+; // ----------
+; //
+; // This routine can use a 3D-Now/MMX enhancement to increase the
+; // accuracy of the fdct_col_4 macro. The dct_col function uses 3D-Now's
+; // PMHULHRW instead of MMX's PMHULHW(and POR). The substitution improves
+; // accuracy very slightly with performance penalty. If the target CPU
+; // does not support 3D-Now, then this function cannot be executed.
+; //
+; // For a fast, precise MMX implementation of inverse-DCT
+; // visit http://www.elecard.com/peter
+; //
+; // v1.0 07/22/2000 (initial release)
+; //
+; // liaor@iname.com http://members.tripod.com/~liaor
+; //////////////////////////////////////////////////////////////////////////////
+
+;;;
+;;; A.Stevens Jul 2000: ported to nasm syntax and disentangled from
+;;; from Win**** compiler specific stuff.
+;;; All the real work was done above though.
+;;; See above for how to optimise quality on 3DNow! CPU's
+
+ ;;
+ ;; Macros for code-readability...
+ ;;
+%define INP eax ; pointer to (short *blk)
+%define OUT ecx ; pointer to output (temporary store space qwTemp[])
+%define TABLE ebx ; pointer to tab_frw_01234567[]
+%define TABLEF ebx ; pointer to tg_all_16
+%define round_frw_row edx
+
+
+%define x0 INP + 0*16
+%define x1 INP + 1*16
+%define x2 INP + 2*16
+%define x3 INP + 3*16
+%define x4 INP + 4*16
+%define x5 INP + 5*16
+%define x6 INP + 6*16
+%define x7 INP + 7*16
+%define y0 OUT + 0*16
+%define y1 OUT + 1*16
+%define y2 OUT + 2*16
+%define y3 OUT + 3*16
+%define y4 OUT + 4*16
+%define y5 OUT + 5*16
+%define y6 OUT + 6*16
+%define y7 OUT + 7*16
+
+ ;;
+ ;; Constants for DCT
+ ;;
+%define BITS_FRW_ACC 3 ; 2 or 3 for accuracy
+%define SHIFT_FRW_COL BITS_FRW_ACC
+%define SHIFT_FRW_ROW (BITS_FRW_ACC + 17)
+%define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1))
+%define RND_FRW_COL (1 << (SHIFT_FRW_COL-1))
+
+extern fdct_one_corr
+extern fdct_r_row ; Defined in C for convenience
+ ;;
+ ;; Concatenated table of forward dct transformation coeffs.
+ ;;
+extern fdct_tg_all_16 ; Defined in C for convenience
+ ;; Offsets into table..
+
+%define tg_1_16 (TABLEF + 0)
+%define tg_2_16 (TABLEF + 8)
+%define tg_3_16 (TABLEF + 16)
+%define cos_4_16 (TABLEF + 24)
+%define ocos_4_16 (TABLEF + 32)
+
+ ;;
+ ;; Concatenated table of forward dct coefficients
+ ;;
+extern tab_frw_01234567 ; Defined in C for convenience
+
+ ;; Offsets into table..
+SECTION .text
+
+global fdct_mmx
+
+;;;
+;;; void fdct_mmx( short *blk )
+;;;
+
+
+
+; ////////////////////////////////////////////////////////////////////////
+; //
+; // The high-level pseudocode for the fdct_am32() routine :
+; //
+; // fdct_am32()
+; // {
+; // forward_dct_col03(); // dct_column transform on cols 0-3
+; // forward_dct_col47(); // dct_column transform on cols 4-7
+; // for ( j = 0; j < 8; j=j+1 )
+; // forward_dct_row1(j); // dct_row transform on row #j
+; // }
+; //
+;
+
+align 32
+fdct_mmx:
+ push ebp ; save stack pointer
+ mov ebp, esp ; link
+
+ push ebx
+ push ecx
+ push edx
+ push edi
+
+ mov INP, [ebp+8]; ; input data is row 0 of blk[]
+ ;// transform the left half of the matrix (4 columns)
+
+ lea TABLEF, [fdct_tg_all_16];
+ mov OUT, INP;
+
+; lea round_frw_col, [r_frw_col]
+ ; for ( i = 0; i < 2; i = i + 1)
+ ; the for-loop is executed twice. We are better off unrolling the
+ ; loop to avoid branch misprediction.
+.mmx32_fdct_col03:
+ movq mm0, [x1] ; 0 ; x1
+ ;;
+
+ movq mm1, [x6] ; 1 ; x6
+ movq mm2, mm0 ; 2 ; x1
+
+ movq mm3, [x2] ; 3 ; x2
+ paddsw mm0, mm1 ; t1 = x[1] + x[6]
+
+ movq mm4, [x5] ; 4 ; x5
+ psllw mm0, SHIFT_FRW_COL ; t1
+
+ movq mm5, [x0] ; 5 ; x0
+ paddsw mm4, mm3 ; t2 = x[2] + x[5]
+
+ paddsw mm5, [x7] ; t0 = x[0] + x[7]
+ psllw mm4, SHIFT_FRW_COL ; t2
+
+ movq mm6, mm0 ; 6 ; t1
+ psubsw mm2, mm1 ; 1 ; t6 = x[1] - x[6]
+
+ movq mm1, [tg_2_16] ; 1 ; tg_2_16
+ psubsw mm0, mm4 ; tm12 = t1 - t2
+
+ movq mm7, [x3] ; 7 ; x3
+ pmulhw mm1, mm0 ; tm12*tg_2_16
+
+ paddsw mm7, [x4] ; t3 = x[3] + x[4]
+ psllw mm5, SHIFT_FRW_COL ; t0
+
+ paddsw mm6, mm4 ; 4 ; tp12 = t1 + t2
+ psllw mm7, SHIFT_FRW_COL ; t3
+
+ movq mm4, mm5 ; 4 ; t0
+ psubsw mm5, mm7 ; tm03 = t0 - t3
+
+ paddsw mm1, mm5 ; y2 = tm03 + tm12*tg_2_16
+ paddsw mm4, mm7 ; 7 ; tp03 = t0 + t3
+
+ por mm1, [fdct_one_corr] ; correction y2 +0.5
+ psllw mm2, SHIFT_FRW_COL+1 ; t6
+
+ pmulhw mm5, [tg_2_16] ; tm03*tg_2_16
+ movq mm7, mm4 ; 7 ; tp03
+
+ psubsw mm3, [x5] ; t5 = x[2] - x[5]
+ psubsw mm4, mm6 ; y4 = tp03 - tp12
+
+ movq [y2], mm1 ; 1 ; save y2
+ paddsw mm7, mm6 ; 6 ; y0 = tp03 + tp12
+
+ movq mm1, [x3] ; 1 ; x3
+ psllw mm3, SHIFT_FRW_COL+1 ; t5
+
+ psubsw mm1, [x4] ; t4 = x[3] - x[4]
+ movq mm6, mm2 ; 6 ; t6
+
+ movq [y4], mm4 ; 4 ; save y4
+ paddsw mm2, mm3 ; t6 + t5
+
+ pmulhw mm2, [ocos_4_16] ; tp65 = (t6 + t5)*cos_4_16
+ psubsw mm6, mm3 ; 3 ; t6 - t5
+
+ pmulhw mm6, [ocos_4_16] ; tm65 = (t6 - t5)*cos_4_16
+ psubsw mm5, mm0 ; 0 ; y6 = tm03*tg_2_16 - tm12
+
+ por mm5, [fdct_one_corr] ; correction y6 +0.5
+ psllw mm1, SHIFT_FRW_COL ; t4
+
+ por mm2, [fdct_one_corr] ; correction tp65 +0.5
+ movq mm4, mm1 ; 4 ; t4
+
+ movq mm3, [x0] ; 3 ; x0
+ paddsw mm1, mm6 ; tp465 = t4 + tm65
+
+ psubsw mm3, [x7] ; t7 = x[0] - x[7]
+ psubsw mm4, mm6 ; 6 ; tm465 = t4 - tm65
+
+ movq mm0, [tg_1_16] ; 0 ; tg_1_16
+ psllw mm3, SHIFT_FRW_COL ; t7
+
+ movq mm6, [tg_3_16] ; 6 ; tg_3_16
+ pmulhw mm0, mm1 ; tp465*tg_1_16
+
+ movq [y0], mm7 ; 7 ; save y0
+ pmulhw mm6, mm4 ; tm465*tg_3_16
+
+ movq [y6], mm5 ; 5 ; save y6
+ movq mm7, mm3 ; 7 ; t7
+
+ movq mm5, [tg_3_16] ; 5 ; tg_3_16
+ psubsw mm7, mm2 ; tm765 = t7 - tp65
+
+ paddsw mm3, mm2 ; 2 ; tp765 = t7 + tp65
+ pmulhw mm5, mm7 ; tm765*tg_3_16
+
+ paddsw mm0, mm3 ; y1 = tp765 + tp465*tg_1_16
+ paddsw mm6, mm4 ; tm465*tg_3_16
+
+ pmulhw mm3, [tg_1_16] ; tp765*tg_1_16
+ ;;
+
+ por mm0, [fdct_one_corr] ; correction y1 +0.5
+ paddsw mm5, mm7 ; tm765*tg_3_16
+
+ psubsw mm7, mm6 ; 6 ; y3 = tm765 - tm465*tg_3_16
+ add INP, 0x08 ; ; increment pointer
+
+ movq [y1], mm0 ; 0 ; save y1
+ paddsw mm5, mm4 ; 4 ; y5 = tm765*tg_3_16 + tm465
+
+ movq [y3], mm7 ; 7 ; save y3
+ psubsw mm3, mm1 ; 1 ; y7 = tp765*tg_1_16 - tp465
+
+ movq [y5], mm5 ; 5 ; save y5
+
+
+.mmx32_fdct_col47: ; begin processing last four columns
+ movq mm0, [x1] ; 0 ; x1
+ ;;
+ movq [y7], mm3 ; 3 ; save y7 (columns 0-4)
+ ;;
+
+ movq mm1, [x6] ; 1 ; x6
+ movq mm2, mm0 ; 2 ; x1
+
+ movq mm3, [x2] ; 3 ; x2
+ paddsw mm0, mm1 ; t1 = x[1] + x[6]
+
+ movq mm4, [x5] ; 4 ; x5
+ psllw mm0, SHIFT_FRW_COL ; t1
+
+ movq mm5, [x0] ; 5 ; x0
+ paddsw mm4, mm3 ; t2 = x[2] + x[5]
+
+ paddsw mm5, [x7] ; t0 = x[0] + x[7]
+ psllw mm4, SHIFT_FRW_COL ; t2
+
+ movq mm6, mm0 ; 6 ; t1
+ psubsw mm2, mm1 ; 1 ; t6 = x[1] - x[6]
+
+ movq mm1, [tg_2_16] ; 1 ; tg_2_16
+ psubsw mm0, mm4 ; tm12 = t1 - t2
+
+ movq mm7, [x3] ; 7 ; x3
+ pmulhw mm1, mm0 ; tm12*tg_2_16
+
+ paddsw mm7, [x4] ; t3 = x[3] + x[4]
+ psllw mm5, SHIFT_FRW_COL ; t0
+
+ paddsw mm6, mm4 ; 4 ; tp12 = t1 + t2
+ psllw mm7, SHIFT_FRW_COL ; t3
+
+ movq mm4, mm5 ; 4 ; t0
+ psubsw mm5, mm7 ; tm03 = t0 - t3
+
+ paddsw mm1, mm5 ; y2 = tm03 + tm12*tg_2_16
+ paddsw mm4, mm7 ; 7 ; tp03 = t0 + t3
+
+ por mm1, [fdct_one_corr] ; correction y2 +0.5
+ psllw mm2, SHIFT_FRW_COL+1 ; t6
+
+ pmulhw mm5, [tg_2_16] ; tm03*tg_2_16
+ movq mm7, mm4 ; 7 ; tp03
+
+ psubsw mm3, [x5] ; t5 = x[2] - x[5]
+ psubsw mm4, mm6 ; y4 = tp03 - tp12
+
+ movq [y2+8], mm1 ; 1 ; save y2
+ paddsw mm7, mm6 ; 6 ; y0 = tp03 + tp12
+
+ movq mm1, [x3] ; 1 ; x3
+ psllw mm3, SHIFT_FRW_COL+1 ; t5
+
+ psubsw mm1, [x4] ; t4 = x[3] - x[4]
+ movq mm6, mm2 ; 6 ; t6
+
+ movq [y4+8], mm4 ; 4 ; save y4
+ paddsw mm2, mm3 ; t6 + t5
+
+ pmulhw mm2, [ocos_4_16] ; tp65 = (t6 + t5)*cos_4_16
+ psubsw mm6, mm3 ; 3 ; t6 - t5
+
+ pmulhw mm6, [ocos_4_16] ; tm65 = (t6 - t5)*cos_4_16
+ psubsw mm5, mm0 ; 0 ; y6 = tm03*tg_2_16 - tm12
+
+ por mm5, [fdct_one_corr] ; correction y6 +0.5
+ psllw mm1, SHIFT_FRW_COL ; t4
+
+ por mm2, [fdct_one_corr] ; correction tp65 +0.5
+ movq mm4, mm1 ; 4 ; t4
+
+ movq mm3, [x0] ; 3 ; x0
+ paddsw mm1, mm6 ; tp465 = t4 + tm65
+
+ psubsw mm3, [x7] ; t7 = x[0] - x[7]
+ psubsw mm4, mm6 ; 6 ; tm465 = t4 - tm65
+
+ movq mm0, [tg_1_16] ; 0 ; tg_1_16
+ psllw mm3, SHIFT_FRW_COL ; t7
+
+ movq mm6, [tg_3_16] ; 6 ; tg_3_16
+ pmulhw mm0, mm1 ; tp465*tg_1_16
+
+ movq [y0+8], mm7 ; 7 ; save y0
+ pmulhw mm6, mm4 ; tm465*tg_3_16
+
+ movq [y6+8], mm5 ; 5 ; save y6
+ movq mm7, mm3 ; 7 ; t7
+
+ movq mm5, [tg_3_16] ; 5 ; tg_3_16
+ psubsw mm7, mm2 ; tm765 = t7 - tp65
+
+ paddsw mm3, mm2 ; 2 ; tp765 = t7 + tp65
+ pmulhw mm5, mm7 ; tm765*tg_3_16
+
+ paddsw mm0, mm3 ; y1 = tp765 + tp465*tg_1_16
+ paddsw mm6, mm4 ; tm465*tg_3_16
+
+ pmulhw mm3, [tg_1_16] ; tp765*tg_1_16
+ ;;
+
+ por mm0, [fdct_one_corr] ; correction y1 +0.5
+ paddsw mm5, mm7 ; tm765*tg_3_16
+
+ psubsw mm7, mm6 ; 6 ; y3 = tm765 - tm465*tg_3_16
+ ;;
+
+ movq [y1+8], mm0 ; 0 ; save y1
+ paddsw mm5, mm4 ; 4 ; y5 = tm765*tg_3_16 + tm465
+
+ movq [y3+8], mm7 ; 7 ; save y3
+ psubsw mm3, mm1 ; 1 ; y7 = tp765*tg_1_16 - tp465
+
+ movq [y5+8], mm5 ; 5 ; save y5
+
+ movq [y7+8], mm3 ; 3 ; save y7
+
+; emms;
+; } ; end of forward_dct_col07()
+ ; done with dct_row transform
+
+
+ ; fdct_mmx32_cols() --
+ ; the following subroutine repeats the row-transform operation,
+ ; except with different shift&round constants. This version
+ ; does NOT transpose the output again. Thus the final output
+ ; is transposed with respect to the source.
+ ;
+ ; The output is stored into blk[], which destroys the original
+ ; input data.
+ mov INP, [ebp+8]; ;; row 0
+ mov edi, 0x08; ;x = 8
+
+ lea TABLE, [tab_frw_01234567]; ; row 0
+ mov OUT, INP;
+
+ lea round_frw_row, [fdct_r_row];
+ ; for ( x = 8; x > 0; --x ) ; transform one row per iteration
+
+; ---------- loop begin
+ .lp_mmx_fdct_row1:
+ movd mm5, [INP+12]; ; mm5 = 7 6
+
+ punpcklwd mm5, [INP+8] ; mm5 = 5 7 4 6
+
+ movq mm2, mm5; ; mm2 = 5 7 4 6
+ psrlq mm5, 32; ; mm5 = _ _ 5 7
+
+ movq mm0, [INP]; ; mm0 = 3 2 1 0
+ punpcklwd mm5, mm2;; mm5 = 4 5 6 7
+
+ movq mm1, mm0; ; mm1 = 3 2 1 0
+ paddsw mm0, mm5; ; mm0 = [3+4, 2+5, 1+6, 0+7] (xt3, xt2, xt1, xt0)
+
+ psubsw mm1, mm5; ; mm1 = [3-4, 2-5, 1-6, 0-7] (xt7, xt6, xt5, xt4)
+ movq mm2, mm0; ; mm2 = [ xt3 xt2 xt1 xt0 ]
+
+ ;movq [ xt3xt2xt1xt0 ], mm0;
+ ;movq [ xt7xt6xt5xt4 ], mm1;
+
+ punpcklwd mm0, mm1;; mm0 = [ xt5 xt1 xt4 xt0 ]
+
+ punpckhwd mm2, mm1;; mm2 = [ xt7 xt3 xt6 xt2 ]
+ movq mm1, mm2; ; mm1
+
+ ;; shuffle bytes around
+
+; movq mm0, [INP] ; 0 ; x3 x2 x1 x0
+
+; movq mm1, [INP+8] ; 1 ; x7 x6 x5 x4
+ movq mm2, mm0 ; 2 ; x3 x2 x1 x0
+
+ movq mm3, [TABLE] ; 3 ; w06 w04 w02 w00
+ punpcklwd mm0, mm1 ; x5 x1 x4 x0
+
+ movq mm5, mm0 ; 5 ; x5 x1 x4 x0
+ punpckldq mm0, mm0 ; x4 x0 x4 x0 [ xt2 xt0 xt2 xt0 ]
+
+ movq mm4, [TABLE+8] ; 4 ; w07 w05 w03 w01
+ punpckhwd mm2, mm1 ; 1 ; x7 x3 x6 x2
+
+ pmaddwd mm3, mm0 ; x4*w06+x0*w04 x4*w02+x0*w00
+ movq mm6, mm2 ; 6 ; x7 x3 x6 x2
+
+ movq mm1, [TABLE+32] ; 1 ; w22 w20 w18 w16
+ punpckldq mm2, mm2 ; x6 x2 x6 x2 [ xt3 xt1 xt3 xt1 ]
+
+ pmaddwd mm4, mm2 ; x6*w07+x2*w05 x6*w03+x2*w01
+ punpckhdq mm5, mm5 ; x5 x1 x5 x1 [ xt6 xt4 xt6 xt4 ]
+
+ pmaddwd mm0, [TABLE+16] ; x4*w14+x0*w12 x4*w10+x0*w08
+ punpckhdq mm6, mm6 ; x7 x3 x7 x3 [ xt7 xt5 xt7 xt5 ]
+
+ movq mm7, [TABLE+40] ; 7 ; w23 w21 w19 w17
+ pmaddwd mm1, mm5 ; x5*w22+x1*w20 x5*w18+x1*w16
+;mm3 = a1, a0 (y2,y0)
+;mm1 = b1, b0 (y3,y1)
+;mm0 = a3,a2 (y6,y4)
+;mm5 = b3,b2 (y7,y5)
+
+ paddd mm3, [round_frw_row] ; +rounder (y2,y0)
+ pmaddwd mm7, mm6 ; x7*w23+x3*w21 x7*w19+x3*w17
+
+ pmaddwd mm2, [TABLE+24] ; x6*w15+x2*w13 x6*w11+x2*w09
+ paddd mm3, mm4 ; 4 ; a1=sum(even1) a0=sum(even0) ; now ( y2, y0)
+
+ pmaddwd mm5, [TABLE+48] ; x5*w30+x1*w28 x5*w26+x1*w24
+ ;;
+
+ pmaddwd mm6, [TABLE+56] ; x7*w31+x3*w29 x7*w27+x3*w25
+ paddd mm1, mm7 ; 7 ; b1=sum(odd1) b0=sum(odd0) ; now ( y3, y1)
+
+ paddd mm0, [round_frw_row] ; +rounder (y6,y4)
+ psrad mm3, SHIFT_FRW_ROW ; (y2, y0)
+
+ paddd mm1, [round_frw_row] ; +rounder (y3,y1)
+ paddd mm0, mm2 ; 2 ; a3=sum(even3) a2=sum(even2) ; now (y6, y4)
+
+ paddd mm5, [round_frw_row] ; +rounder (y7,y5)
+ psrad mm1, SHIFT_FRW_ROW ; y1=a1+b1 y0=a0+b0
+
+ paddd mm5, mm6 ; 6 ; b3=sum(odd3) b2=sum(odd2) ; now ( y7, y5)
+ psrad mm0, SHIFT_FRW_ROW ;y3=a3+b3 y2=a2+b2
+
+ add OUT, 16; ; increment row-output address by 1 row
+ psrad mm5, SHIFT_FRW_ROW ; y4=a3-b3 y5=a2-b2
+
+ add INP, 16; ; increment row-address by 1 row
+ packssdw mm3, mm0 ; 0 ; y6 y4 y2 y0
+
+ packssdw mm1, mm5 ; 3 ; y7 y5 y3 y1
+ movq mm6, mm3; ; mm0 = y6 y4 y2 y0
+
+ punpcklwd mm3, mm1; ; y3 y2 y1 y0
+ sub edi, 0x01; ; i = i - 1
+
+ punpckhwd mm6, mm1; ; y7 y6 y5 y4
+ add TABLE,64; ; increment to next table
+
+ movq [OUT-16], mm3 ; 1 ; save y3 y2 y1 y0
+
+ movq [OUT-8], mm6 ; 7 ; save y7 y6 y5 y4
+
+ cmp edi, 0x00;
+ jg near .lp_mmx_fdct_row1; ; begin fdct processing on next row
+ ;;
+ ;; Tidy up and return
+ ;;
+ pop edi
+ pop edx
+ pop ecx
+ pop ebx
+
+ pop ebp ; restore stack pointer
+ emms
+ ret
+ \ No newline at end of file
diff --git a/libavcodec/i386/fdctdata.c b/libavcodec/i386/fdctdata.c
new file mode 100644
index 0000000000..e095d0f4e4
--- /dev/null
+++ b/libavcodec/i386/fdctdata.c
@@ -0,0 +1,143 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// fdctam32.c - AP922 MMX(3D-Now) forward-DCT
+// ----------
+// Intel Application Note AP-922 - fast, precise implementation of DCT
+// http://developer.intel.com/vtune/cbts/appnotes.htm
+// ----------
+//
+// This routine uses a 3D-Now/MMX enhancement to increase the
+// accuracy of the fdct_col_4 macro. The dct_col function uses 3D-Now's
+// PMHULHRW instead of MMX's PMHULHW(and POR). The substitution improves
+// accuracy very slightly with performance penalty. If the target CPU
+// does not support 3D-Now, then this function cannot be executed.
+// fdctmm32.c contains the standard MMX implementation of AP-922.
+//
+// For a fast, precise MMX implementation of inverse-DCT
+// visit http://www.elecard.com/peter
+//
+// v1.0 07/22/2000 (initial release)
+// Initial release of AP922 MMX(3D-Now) forward_DCT.
+// This code was tested with Visual C++ 6.0Pro + service_pack4 +
+// processor_pack_beta! If you have the processor_pack_beta, you can
+// remove the #include for amd3dx.h, and substitute the 'normal'
+// assembly lines for the macro'd versions. Otherwise, this
+// code should compile 'as is', under Visual C++ 6.0 Pro.
+//
+// liaor@iname.com http://members.tripod.com/~liaor
+//////////////////////////////////////////////////////////////////////////////
+
+#include <inttypes.h>
+
+//////////////////////////////////////////////////////////////////////
+//
+// constants for the forward DCT
+// -----------------------------
+//
+// Be sure to check that your compiler is aligning all constants to QWORD
+// (8-byte) memory boundaries! Otherwise the unaligned memory access will
+// severely stall MMX execution.
+//
+//////////////////////////////////////////////////////////////////////
+
+#define BITS_FRW_ACC 3 //; 2 or 3 for accuracy
+#define SHIFT_FRW_COL BITS_FRW_ACC
+#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17)
+//#define RND_FRW_ROW (262144 * (BITS_FRW_ACC - 1)) //; 1 << (SHIFT_FRW_ROW-1)
+#define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1))
+//#define RND_FRW_COL (2 * (BITS_FRW_ACC - 1)) //; 1 << (SHIFT_FRW_COL-1)
+#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1))
+
+//concatenated table, for forward DCT transformation
+const int16_t fdct_tg_all_16[] = {
+ 13036, 13036, 13036, 13036, // tg * (2<<16) + 0.5
+ 27146, 27146, 27146, 27146, // tg * (2<<16) + 0.5
+ -21746, -21746, -21746, -21746, // tg * (2<<16) + 0.5
+ -19195, -19195, -19195, -19195, //cos * (2<<16) + 0.5
+ 23170, 23170, 23170, 23170 }; //cos * (2<<15) + 0.5
+const long long fdct_one_corr = 0x0001000100010001LL;
+const long fdct_r_row[2] = {RND_FRW_ROW, RND_FRW_ROW };
+
+const int16_t tab_frw_01234567[] = { // forward_dct coeff table
+ //row0
+ 16384, 16384, 21407, -8867, // w09 w01 w08 w00
+ 16384, 16384, 8867, -21407, // w13 w05 w12 w04
+ 16384, -16384, 8867, 21407, // w11 w03 w10 w02
+ -16384, 16384, -21407, -8867, // w15 w07 w14 w06
+ 22725, 12873, 19266, -22725, // w22 w20 w18 w16
+ 19266, 4520, -4520, -12873, // w23 w21 w19 w17
+ 12873, 4520, 4520, 19266, // w30 w28 w26 w24
+ -22725, 19266, -12873, -22725, // w31 w29 w27 w25
+
+ //row1
+ 22725, 22725, 29692, -12299, // w09 w01 w08 w00
+ 22725, 22725, 12299, -29692, // w13 w05 w12 w04
+ 22725, -22725, 12299, 29692, // w11 w03 w10 w02
+ -22725, 22725, -29692, -12299, // w15 w07 w14 w06
+ 31521, 17855, 26722, -31521, // w22 w20 w18 w16
+ 26722, 6270, -6270, -17855, // w23 w21 w19 w17
+ 17855, 6270, 6270, 26722, // w30 w28 w26 w24
+ -31521, 26722, -17855, -31521, // w31 w29 w27 w25
+
+ //row2
+ 21407, 21407, 27969, -11585, // w09 w01 w08 w00
+ 21407, 21407, 11585, -27969, // w13 w05 w12 w04
+ 21407, -21407, 11585, 27969, // w11 w03 w10 w02
+ -21407, 21407, -27969, -11585, // w15 w07 w14 w06
+ 29692, 16819, 25172, -29692, // w22 w20 w18 w16
+ 25172, 5906, -5906, -16819, // w23 w21 w19 w17
+ 16819, 5906, 5906, 25172, // w30 w28 w26 w24
+ -29692, 25172, -16819, -29692, // w31 w29 w27 w25
+
+ //row3
+ 19266, 19266, 25172, -10426, // w09 w01 w08 w00
+ 19266, 19266, 10426, -25172, // w13 w05 w12 w04
+ 19266, -19266, 10426, 25172, // w11 w03 w10 w02
+ -19266, 19266, -25172, -10426, // w15 w07 w14 w06,
+ 26722, 15137, 22654, -26722, // w22 w20 w18 w16
+ 22654, 5315, -5315, -15137, // w23 w21 w19 w17
+ 15137, 5315, 5315, 22654, // w30 w28 w26 w24
+ -26722, 22654, -15137, -26722, // w31 w29 w27 w25,
+
+ //row4
+ 16384, 16384, 21407, -8867, // w09 w01 w08 w00
+ 16384, 16384, 8867, -21407, // w13 w05 w12 w04
+ 16384, -16384, 8867, 21407, // w11 w03 w10 w02
+ -16384, 16384, -21407, -8867, // w15 w07 w14 w06
+ 22725, 12873, 19266, -22725, // w22 w20 w18 w16
+ 19266, 4520, -4520, -12873, // w23 w21 w19 w17
+ 12873, 4520, 4520, 19266, // w30 w28 w26 w24
+ -22725, 19266, -12873, -22725, // w31 w29 w27 w25
+
+ //row5
+ 19266, 19266, 25172, -10426, // w09 w01 w08 w00
+ 19266, 19266, 10426, -25172, // w13 w05 w12 w04
+ 19266, -19266, 10426, 25172, // w11 w03 w10 w02
+ -19266, 19266, -25172, -10426, // w15 w07 w14 w06
+ 26722, 15137, 22654, -26722, // w22 w20 w18 w16
+ 22654, 5315, -5315, -15137, // w23 w21 w19 w17
+ 15137, 5315, 5315, 22654, // w30 w28 w26 w24
+ -26722, 22654, -15137, -26722, // w31 w29 w27 w25
+
+ //row6
+ 21407, 21407, 27969, -11585, // w09 w01 w08 w00
+ 21407, 21407, 11585, -27969, // w13 w05 w12 w04
+ 21407, -21407, 11585, 27969, // w11 w03 w10 w02
+ -21407, 21407, -27969, -11585, // w15 w07 w14 w06,
+ 29692, 16819, 25172, -29692, // w22 w20 w18 w16
+ 25172, 5906, -5906, -16819, // w23 w21 w19 w17
+ 16819, 5906, 5906, 25172, // w30 w28 w26 w24
+ -29692, 25172, -16819, -29692, // w31 w29 w27 w25,
+
+ //row7
+ 22725, 22725, 29692, -12299, // w09 w01 w08 w00
+ 22725, 22725, 12299, -29692, // w13 w05 w12 w04
+ 22725, -22725, 12299, 29692, // w11 w03 w10 w02
+ -22725, 22725, -29692, -12299, // w15 w07 w14 w06,
+ 31521, 17855, 26722, -31521, // w22 w20 w18 w16
+ 26722, 6270, -6270, -17855, // w23 w21 w19 w17
+ 17855, 6270, 6270, 26722, // w30 w28 w26 w24
+ -31521, 26722, -17855, -31521 // w31 w29 w27 w25
+};
+
+
diff --git a/libavcodec/i386/mmx.h b/libavcodec/i386/mmx.h
new file mode 100644
index 0000000000..a82c0eda32
--- /dev/null
+++ b/libavcodec/i386/mmx.h
@@ -0,0 +1,536 @@
+/* mmx.h
+
+ MultiMedia eXtensions GCC interface library for IA32.
+
+ To use this library, simply include this header file
+ and compile with GCC. You MUST have inlining enabled
+ in order for mmx_ok() to work; this can be done by
+ simply using -O on the GCC command line.
+
+ Compiling with -DMMX_TRACE will cause detailed trace
+ output to be sent to stderr for each mmx operation.
+ This adds lots of code, and obviously slows execution to
+ a crawl, but can be very useful for debugging.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY
+ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT
+ LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS FOR ANY PARTICULAR PURPOSE.
+
+ 1997-99 by H. Dietz and R. Fisher
+
+ Notes:
+ It appears that the latest gas has the pand problem fixed, therefore
+ I'll undefine BROKEN_PAND by default.
+*/
+
+#ifndef _MMX_H
+#define _MMX_H
+
+
+/* Warning: at this writing, the version of GAS packaged
+ with most Linux distributions does not handle the
+ parallel AND operation mnemonic correctly. If the
+ symbol BROKEN_PAND is defined, a slower alternative
+ coding will be used. If execution of mmxtest results
+ in an illegal instruction fault, define this symbol.
+*/
+#undef BROKEN_PAND
+
+
+/* The type of an value that fits in an MMX register
+ (note that long long constant values MUST be suffixed
+ by LL and unsigned long long values by ULL, lest
+ they be truncated by the compiler)
+*/
+typedef union {
+ long long q; /* Quadword (64-bit) value */
+ unsigned long long uq; /* Unsigned Quadword */
+ int d[2]; /* 2 Doubleword (32-bit) values */
+ unsigned int ud[2]; /* 2 Unsigned Doubleword */
+ short w[4]; /* 4 Word (16-bit) values */
+ unsigned short uw[4]; /* 4 Unsigned Word */
+ char b[8]; /* 8 Byte (8-bit) values */
+ unsigned char ub[8]; /* 8 Unsigned Byte */
+ float s[2]; /* Single-precision (32-bit) value */
+} __attribute__ ((aligned (8))) mmx_t; /* On an 8-byte (64-bit) boundary */
+
+
+/* Helper functions for the instruction macros that follow...
+ (note that memory-to-register, m2r, instructions are nearly
+ as efficient as register-to-register, r2r, instructions;
+ however, memory-to-memory instructions are really simulated
+ as a convenience, and are only 1/3 as efficient)
+*/
+#ifdef MMX_TRACE
+
+/* Include the stuff for printing a trace to stderr...
+*/
+
+#include <stdio.h>
+
+#define mmx_i2r(op, imm, reg) \
+ { \
+ mmx_t mmx_trace; \
+ mmx_trace.uq = (imm); \
+ fprintf(stderr, #op "_i2r(" #imm "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #reg "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "X" (imm)); \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #reg "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#define mmx_m2r(op, mem, reg) \
+ { \
+ mmx_t mmx_trace; \
+ mmx_trace = (mem); \
+ fprintf(stderr, #op "_m2r(" #mem "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #reg "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "X" (mem)); \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #reg "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#define mmx_r2m(op, reg, mem) \
+ { \
+ mmx_t mmx_trace; \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #op "_r2m(" #reg "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ mmx_trace = (mem); \
+ fprintf(stderr, #mem "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ (#op " %%" #reg ", %0" \
+ : "=X" (mem) \
+ : /* nothing */ ); \
+ mmx_trace = (mem); \
+ fprintf(stderr, #mem "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#define mmx_r2r(op, regs, regd) \
+ { \
+ mmx_t mmx_trace; \
+ __asm__ __volatile__ ("movq %%" #regs ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #op "_r2r(" #regs "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ ("movq %%" #regd ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #regd "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ (#op " %" #regs ", %" #regd); \
+ __asm__ __volatile__ ("movq %%" #regd ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #regd "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#define mmx_m2m(op, mems, memd) \
+ { \
+ mmx_t mmx_trace; \
+ mmx_trace = (mems); \
+ fprintf(stderr, #op "_m2m(" #mems "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ mmx_trace = (memd); \
+ fprintf(stderr, #memd "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
+ #op " %1, %%mm0\n\t" \
+ "movq %%mm0, %0" \
+ : "=X" (memd) \
+ : "X" (mems)); \
+ mmx_trace = (memd); \
+ fprintf(stderr, #memd "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#else
+
+/* These macros are a lot simpler without the tracing...
+*/
+
+#define mmx_i2r(op, imm, reg) \
+ __asm__ __volatile__ (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "i" (imm) )
+
+#define mmx_m2r(op, mem, reg) \
+ __asm__ __volatile__ (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "m" (mem))
+
+#define mmx_r2m(op, reg, mem) \
+ __asm__ __volatile__ (#op " %%" #reg ", %0" \
+ : "=m" (mem) \
+ : /* nothing */ )
+
+#define mmx_r2r(op, regs, regd) \
+ __asm__ __volatile__ (#op " %" #regs ", %" #regd)
+
+#define mmx_m2m(op, mems, memd) \
+ __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
+ #op " %1, %%mm0\n\t" \
+ "movq %%mm0, %0" \
+ : "=m" (memd) \
+ : "m" (mems))
+
+#endif
+
+
+/* 1x64 MOVe Quadword
+ (this is both a load and a store...
+ in fact, it is the only way to store)
+*/
+#define movq_m2r(var, reg) mmx_m2r(movq, var, reg)
+#define movq_r2m(reg, var) mmx_r2m(movq, reg, var)
+#define movq_r2r(regs, regd) mmx_r2r(movq, regs, regd)
+#define movq(vars, vard) \
+ __asm__ __volatile__ ("movq %1, %%mm0\n\t" \
+ "movq %%mm0, %0" \
+ : "=X" (vard) \
+ : "X" (vars))
+
+
+/* 1x32 MOVe Doubleword
+ (like movq, this is both load and store...
+ but is most useful for moving things between
+ mmx registers and ordinary registers)
+*/
+#define movd_m2r(var, reg) mmx_m2r(movd, var, reg)
+#define movd_r2m(reg, var) mmx_r2m(movd, reg, var)
+#define movd_r2r(regs, regd) mmx_r2r(movd, regs, regd)
+#define movd(vars, vard) \
+ __asm__ __volatile__ ("movd %1, %%mm0\n\t" \
+ "movd %%mm0, %0" \
+ : "=X" (vard) \
+ : "X" (vars))
+
+
+/* 2x32, 4x16, and 8x8 Parallel ADDs
+*/
+#define paddd_m2r(var, reg) mmx_m2r(paddd, var, reg)
+#define paddd_r2r(regs, regd) mmx_r2r(paddd, regs, regd)
+#define paddd(vars, vard) mmx_m2m(paddd, vars, vard)
+
+#define paddw_m2r(var, reg) mmx_m2r(paddw, var, reg)
+#define paddw_r2r(regs, regd) mmx_r2r(paddw, regs, regd)
+#define paddw(vars, vard) mmx_m2m(paddw, vars, vard)
+
+#define paddb_m2r(var, reg) mmx_m2r(paddb, var, reg)
+#define paddb_r2r(regs, regd) mmx_r2r(paddb, regs, regd)
+#define paddb(vars, vard) mmx_m2m(paddb, vars, vard)
+
+
+/* 4x16 and 8x8 Parallel ADDs using Saturation arithmetic
+*/
+#define paddsw_m2r(var, reg) mmx_m2r(paddsw, var, reg)
+#define paddsw_r2r(regs, regd) mmx_r2r(paddsw, regs, regd)
+#define paddsw(vars, vard) mmx_m2m(paddsw, vars, vard)
+
+#define paddsb_m2r(var, reg) mmx_m2r(paddsb, var, reg)
+#define paddsb_r2r(regs, regd) mmx_r2r(paddsb, regs, regd)
+#define paddsb(vars, vard) mmx_m2m(paddsb, vars, vard)
+
+
+/* 4x16 and 8x8 Parallel ADDs using Unsigned Saturation arithmetic
+*/
+#define paddusw_m2r(var, reg) mmx_m2r(paddusw, var, reg)
+#define paddusw_r2r(regs, regd) mmx_r2r(paddusw, regs, regd)
+#define paddusw(vars, vard) mmx_m2m(paddusw, vars, vard)
+
+#define paddusb_m2r(var, reg) mmx_m2r(paddusb, var, reg)
+#define paddusb_r2r(regs, regd) mmx_r2r(paddusb, regs, regd)
+#define paddusb(vars, vard) mmx_m2m(paddusb, vars, vard)
+
+
+/* 2x32, 4x16, and 8x8 Parallel SUBs
+*/
+#define psubd_m2r(var, reg) mmx_m2r(psubd, var, reg)
+#define psubd_r2r(regs, regd) mmx_r2r(psubd, regs, regd)
+#define psubd(vars, vard) mmx_m2m(psubd, vars, vard)
+
+#define psubw_m2r(var, reg) mmx_m2r(psubw, var, reg)
+#define psubw_r2r(regs, regd) mmx_r2r(psubw, regs, regd)
+#define psubw(vars, vard) mmx_m2m(psubw, vars, vard)
+
+#define psubb_m2r(var, reg) mmx_m2r(psubb, var, reg)
+#define psubb_r2r(regs, regd) mmx_r2r(psubb, regs, regd)
+#define psubb(vars, vard) mmx_m2m(psubb, vars, vard)
+
+
+/* 4x16 and 8x8 Parallel SUBs using Saturation arithmetic
+*/
+#define psubsw_m2r(var, reg) mmx_m2r(psubsw, var, reg)
+#define psubsw_r2r(regs, regd) mmx_r2r(psubsw, regs, regd)
+#define psubsw(vars, vard) mmx_m2m(psubsw, vars, vard)
+
+#define psubsb_m2r(var, reg) mmx_m2r(psubsb, var, reg)
+#define psubsb_r2r(regs, regd) mmx_r2r(psubsb, regs, regd)
+#define psubsb(vars, vard) mmx_m2m(psubsb, vars, vard)
+
+
+/* 4x16 and 8x8 Parallel SUBs using Unsigned Saturation arithmetic
+*/
+#define psubusw_m2r(var, reg) mmx_m2r(psubusw, var, reg)
+#define psubusw_r2r(regs, regd) mmx_r2r(psubusw, regs, regd)
+#define psubusw(vars, vard) mmx_m2m(psubusw, vars, vard)
+
+#define psubusb_m2r(var, reg) mmx_m2r(psubusb, var, reg)
+#define psubusb_r2r(regs, regd) mmx_r2r(psubusb, regs, regd)
+#define psubusb(vars, vard) mmx_m2m(psubusb, vars, vard)
+
+
+/* 4x16 Parallel MULs giving Low 4x16 portions of results
+*/
+#define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg)
+#define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd)
+#define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard)
+
+
+/* 4x16 Parallel MULs giving High 4x16 portions of results
+*/
+#define pmulhw_m2r(var, reg) mmx_m2r(pmulhw, var, reg)
+#define pmulhw_r2r(regs, regd) mmx_r2r(pmulhw, regs, regd)
+#define pmulhw(vars, vard) mmx_m2m(pmulhw, vars, vard)
+
+
+/* 4x16->2x32 Parallel Mul-ADD
+ (muls like pmullw, then adds adjacent 16-bit fields
+ in the multiply result to make the final 2x32 result)
+*/
+#define pmaddwd_m2r(var, reg) mmx_m2r(pmaddwd, var, reg)
+#define pmaddwd_r2r(regs, regd) mmx_r2r(pmaddwd, regs, regd)
+#define pmaddwd(vars, vard) mmx_m2m(pmaddwd, vars, vard)
+
+
+/* 1x64 bitwise AND
+*/
+#ifdef BROKEN_PAND
+#define pand_m2r(var, reg) \
+ { \
+ mmx_m2r(pandn, (mmx_t) -1LL, reg); \
+ mmx_m2r(pandn, var, reg); \
+ }
+#define pand_r2r(regs, regd) \
+ { \
+ mmx_m2r(pandn, (mmx_t) -1LL, regd); \
+ mmx_r2r(pandn, regs, regd) \
+ }
+#define pand(vars, vard) \
+ { \
+ movq_m2r(vard, mm0); \
+ mmx_m2r(pandn, (mmx_t) -1LL, mm0); \
+ mmx_m2r(pandn, vars, mm0); \
+ movq_r2m(mm0, vard); \
+ }
+#else
+#define pand_m2r(var, reg) mmx_m2r(pand, var, reg)
+#define pand_r2r(regs, regd) mmx_r2r(pand, regs, regd)
+#define pand(vars, vard) mmx_m2m(pand, vars, vard)
+#endif
+
+
+/* 1x64 bitwise AND with Not the destination
+*/
+#define pandn_m2r(var, reg) mmx_m2r(pandn, var, reg)
+#define pandn_r2r(regs, regd) mmx_r2r(pandn, regs, regd)
+#define pandn(vars, vard) mmx_m2m(pandn, vars, vard)
+
+
+/* 1x64 bitwise OR
+*/
+#define por_m2r(var, reg) mmx_m2r(por, var, reg)
+#define por_r2r(regs, regd) mmx_r2r(por, regs, regd)
+#define por(vars, vard) mmx_m2m(por, vars, vard)
+
+
+/* 1x64 bitwise eXclusive OR
+*/
+#define pxor_m2r(var, reg) mmx_m2r(pxor, var, reg)
+#define pxor_r2r(regs, regd) mmx_r2r(pxor, regs, regd)
+#define pxor(vars, vard) mmx_m2m(pxor, vars, vard)
+
+
+/* 2x32, 4x16, and 8x8 Parallel CoMPare for EQuality
+ (resulting fields are either 0 or -1)
+*/
+#define pcmpeqd_m2r(var, reg) mmx_m2r(pcmpeqd, var, reg)
+#define pcmpeqd_r2r(regs, regd) mmx_r2r(pcmpeqd, regs, regd)
+#define pcmpeqd(vars, vard) mmx_m2m(pcmpeqd, vars, vard)
+
+#define pcmpeqw_m2r(var, reg) mmx_m2r(pcmpeqw, var, reg)
+#define pcmpeqw_r2r(regs, regd) mmx_r2r(pcmpeqw, regs, regd)
+#define pcmpeqw(vars, vard) mmx_m2m(pcmpeqw, vars, vard)
+
+#define pcmpeqb_m2r(var, reg) mmx_m2r(pcmpeqb, var, reg)
+#define pcmpeqb_r2r(regs, regd) mmx_r2r(pcmpeqb, regs, regd)
+#define pcmpeqb(vars, vard) mmx_m2m(pcmpeqb, vars, vard)
+
+
+/* 2x32, 4x16, and 8x8 Parallel CoMPare for Greater Than
+ (resulting fields are either 0 or -1)
+*/
+#define pcmpgtd_m2r(var, reg) mmx_m2r(pcmpgtd, var, reg)
+#define pcmpgtd_r2r(regs, regd) mmx_r2r(pcmpgtd, regs, regd)
+#define pcmpgtd(vars, vard) mmx_m2m(pcmpgtd, vars, vard)
+
+#define pcmpgtw_m2r(var, reg) mmx_m2r(pcmpgtw, var, reg)
+#define pcmpgtw_r2r(regs, regd) mmx_r2r(pcmpgtw, regs, regd)
+#define pcmpgtw(vars, vard) mmx_m2m(pcmpgtw, vars, vard)
+
+#define pcmpgtb_m2r(var, reg) mmx_m2r(pcmpgtb, var, reg)
+#define pcmpgtb_r2r(regs, regd) mmx_r2r(pcmpgtb, regs, regd)
+#define pcmpgtb(vars, vard) mmx_m2m(pcmpgtb, vars, vard)
+
+
+/* 1x64, 2x32, and 4x16 Parallel Shift Left Logical
+*/
+#define psllq_i2r(imm, reg) mmx_i2r(psllq, imm, reg)
+#define psllq_m2r(var, reg) mmx_m2r(psllq, var, reg)
+#define psllq_r2r(regs, regd) mmx_r2r(psllq, regs, regd)
+#define psllq(vars, vard) mmx_m2m(psllq, vars, vard)
+
+#define pslld_i2r(imm, reg) mmx_i2r(pslld, imm, reg)
+#define pslld_m2r(var, reg) mmx_m2r(pslld, var, reg)
+#define pslld_r2r(regs, regd) mmx_r2r(pslld, regs, regd)
+#define pslld(vars, vard) mmx_m2m(pslld, vars, vard)
+
+#define psllw_i2r(imm, reg) mmx_i2r(psllw, imm, reg)
+#define psllw_m2r(var, reg) mmx_m2r(psllw, var, reg)
+#define psllw_r2r(regs, regd) mmx_r2r(psllw, regs, regd)
+#define psllw(vars, vard) mmx_m2m(psllw, vars, vard)
+
+
+/* 1x64, 2x32, and 4x16 Parallel Shift Right Logical
+*/
+#define psrlq_i2r(imm, reg) mmx_i2r(psrlq, imm, reg)
+#define psrlq_m2r(var, reg) mmx_m2r(psrlq, var, reg)
+#define psrlq_r2r(regs, regd) mmx_r2r(psrlq, regs, regd)
+#define psrlq(vars, vard) mmx_m2m(psrlq, vars, vard)
+
+#define psrld_i2r(imm, reg) mmx_i2r(psrld, imm, reg)
+#define psrld_m2r(var, reg) mmx_m2r(psrld, var, reg)
+#define psrld_r2r(regs, regd) mmx_r2r(psrld, regs, regd)
+#define psrld(vars, vard) mmx_m2m(psrld, vars, vard)
+
+#define psrlw_i2r(imm, reg) mmx_i2r(psrlw, imm, reg)
+#define psrlw_m2r(var, reg) mmx_m2r(psrlw, var, reg)
+#define psrlw_r2r(regs, regd) mmx_r2r(psrlw, regs, regd)
+#define psrlw(vars, vard) mmx_m2m(psrlw, vars, vard)
+
+
+/* 2x32 and 4x16 Parallel Shift Right Arithmetic
+*/
+#define psrad_i2r(imm, reg) mmx_i2r(psrad, imm, reg)
+#define psrad_m2r(var, reg) mmx_m2r(psrad, var, reg)
+#define psrad_r2r(regs, regd) mmx_r2r(psrad, regs, regd)
+#define psrad(vars, vard) mmx_m2m(psrad, vars, vard)
+
+#define psraw_i2r(imm, reg) mmx_i2r(psraw, imm, reg)
+#define psraw_m2r(var, reg) mmx_m2r(psraw, var, reg)
+#define psraw_r2r(regs, regd) mmx_r2r(psraw, regs, regd)
+#define psraw(vars, vard) mmx_m2m(psraw, vars, vard)
+
+
+/* 2x32->4x16 and 4x16->8x8 PACK and Signed Saturate
+ (packs source and dest fields into dest in that order)
+*/
+#define packssdw_m2r(var, reg) mmx_m2r(packssdw, var, reg)
+#define packssdw_r2r(regs, regd) mmx_r2r(packssdw, regs, regd)
+#define packssdw(vars, vard) mmx_m2m(packssdw, vars, vard)
+
+#define packsswb_m2r(var, reg) mmx_m2r(packsswb, var, reg)
+#define packsswb_r2r(regs, regd) mmx_r2r(packsswb, regs, regd)
+#define packsswb(vars, vard) mmx_m2m(packsswb, vars, vard)
+
+
+/* 4x16->8x8 PACK and Unsigned Saturate
+ (packs source and dest fields into dest in that order)
+*/
+#define packuswb_m2r(var, reg) mmx_m2r(packuswb, var, reg)
+#define packuswb_r2r(regs, regd) mmx_r2r(packuswb, regs, regd)
+#define packuswb(vars, vard) mmx_m2m(packuswb, vars, vard)
+
+
+/* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK Low
+ (interleaves low half of dest with low half of source
+ as padding in each result field)
+*/
+#define punpckldq_m2r(var, reg) mmx_m2r(punpckldq, var, reg)
+#define punpckldq_r2r(regs, regd) mmx_r2r(punpckldq, regs, regd)
+#define punpckldq(vars, vard) mmx_m2m(punpckldq, vars, vard)
+
+#define punpcklwd_m2r(var, reg) mmx_m2r(punpcklwd, var, reg)
+#define punpcklwd_r2r(regs, regd) mmx_r2r(punpcklwd, regs, regd)
+#define punpcklwd(vars, vard) mmx_m2m(punpcklwd, vars, vard)
+
+#define punpcklbw_m2r(var, reg) mmx_m2r(punpcklbw, var, reg)
+#define punpcklbw_r2r(regs, regd) mmx_r2r(punpcklbw, regs, regd)
+#define punpcklbw(vars, vard) mmx_m2m(punpcklbw, vars, vard)
+
+
+/* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK High
+ (interleaves high half of dest with high half of source
+ as padding in each result field)
+*/
+#define punpckhdq_m2r(var, reg) mmx_m2r(punpckhdq, var, reg)
+#define punpckhdq_r2r(regs, regd) mmx_r2r(punpckhdq, regs, regd)
+#define punpckhdq(vars, vard) mmx_m2m(punpckhdq, vars, vard)
+
+#define punpckhwd_m2r(var, reg) mmx_m2r(punpckhwd, var, reg)
+#define punpckhwd_r2r(regs, regd) mmx_r2r(punpckhwd, regs, regd)
+#define punpckhwd(vars, vard) mmx_m2m(punpckhwd, vars, vard)
+
+#define punpckhbw_m2r(var, reg) mmx_m2r(punpckhbw, var, reg)
+#define punpckhbw_r2r(regs, regd) mmx_r2r(punpckhbw, regs, regd)
+#define punpckhbw(vars, vard) mmx_m2m(punpckhbw, vars, vard)
+
+
+/* Empty MMx State
+ (used to clean-up when going from mmx to float use
+ of the registers that are shared by both; note that
+ there is no float-to-mmx operation needed, because
+ only the float tag word info is corruptible)
+*/
+#ifdef MMX_TRACE
+
+#define emms() \
+ { \
+ fprintf(stderr, "emms()\n"); \
+ __asm__ __volatile__ ("emms"); \
+ }
+
+#else
+
+#define emms() __asm__ __volatile__ ("emms")
+
+#endif
+
+#endif
+
diff --git a/libavcodec/i386/sad_mmx.s b/libavcodec/i386/sad_mmx.s
new file mode 100644
index 0000000000..b512639c25
--- /dev/null
+++ b/libavcodec/i386/sad_mmx.s
@@ -0,0 +1,798 @@
+; MMX/SSE optimized routines for SAD of 16*16 macroblocks
+; Copyright (C) Juan J. Sierralta P. <juanjo@atmlab.utfsm.cl>
+;
+; dist1_* Original Copyright (C) 2000 Chris Atenasio <chris@crud.net>
+; Enhancements and rest Copyright (C) 2000 Andrew Stevens <as@comlab.ox.ac.uk>
+
+;
+; This program is free software; you can redistribute it and/or
+; modify it under the terms of the GNU General Public License
+; as published by the Free Software Foundation; either version 2
+; of the License, or (at your option) any later version.
+;
+; This program is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with this program; if not, write to the Free Software
+; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+;
+
+global pix_abs16x16_mmx
+
+; int pix_abs16x16_mmx(unsigned char *pix1,unsigned char *pix2, int lx, int h);
+; esi = p1 (init: blk1)
+; edi = p2 (init: blk2)
+; ecx = rowsleft (init: h)
+; edx = lx;
+
+; mm0 = distance accumulators (4 words)
+; mm1 = distance accumulators (4 words)
+; mm2 = temp
+; mm3 = temp
+; mm4 = temp
+; mm5 = temp
+; mm6 = 0
+; mm7 = temp
+
+
+align 32
+pix_abs16x16_mmx:
+ push ebp ; save frame pointer
+ mov ebp, esp
+
+ push ebx ; Saves registers (called saves convention in
+ push ecx ; x86 GCC it seems)
+ push edx ;
+ push esi
+ push edi
+
+ pxor mm0, mm0 ; zero acculumators
+ pxor mm1, mm1
+ pxor mm6, mm6
+ mov esi, [ebp+8] ; get pix1
+ mov edi, [ebp+12] ; get pix2
+ mov edx, [ebp+16] ; get lx
+ mov ecx, [ebp+20] ; get rowsleft
+ jmp .nextrow
+align 32
+
+.nextrow:
+ ; First 8 bytes of the row
+
+ movq mm4, [edi] ; load first 8 bytes of pix2 row
+ movq mm5, [esi] ; load first 8 bytes of pix1 row
+ movq mm3, mm4 ; mm4 := abs(mm4-mm5)
+ movq mm2,[esi+8] ; load last 8 bytes of pix1 row
+ psubusb mm4, mm5
+ movq mm7,[edi+8] ; load last 8 bytes of pix2 row
+ psubusb mm5, mm3
+ por mm4, mm5
+
+ ; Last 8 bytes of the row
+
+ movq mm3, mm7 ; mm7 := abs(mm7-mm2)
+ psubusb mm7, mm2
+ psubusb mm2, mm3
+ por mm7, mm2
+
+ ; Now mm4 and mm7 have 16 absdiffs to add
+
+ ; First 8 bytes of the row2
+
+
+ add edi, edx
+ movq mm2, [edi] ; load first 8 bytes of pix2 row
+ add esi, edx
+ movq mm5, [esi] ; load first 8 bytes of pix1 row
+
+
+
+ movq mm3, mm2 ; mm2 := abs(mm2-mm5)
+ psubusb mm2, mm5
+ movq mm6,[esi+8] ; load last 8 bytes of pix1 row
+ psubusb mm5, mm3
+ por mm2, mm5
+
+ ; Last 8 bytes of the row2
+
+ movq mm5,[edi+8] ; load last 8 bytes of pix2 row
+
+
+ movq mm3, mm5 ; mm5 := abs(mm5-mm6)
+ psubusb mm5, mm6
+ psubusb mm6, mm3
+ por mm5, mm6
+
+ ; Now mm2, mm4, mm5, mm7 have 32 absdiffs
+
+ movq mm3, mm7
+
+ pxor mm6, mm6 ; Zero mm6
+
+ punpcklbw mm3, mm6 ; Unpack to words and add
+ punpckhbw mm7, mm6
+ paddusw mm7, mm3
+
+ movq mm3, mm5
+
+ punpcklbw mm3, mm6 ; Unpack to words and add
+ punpckhbw mm5, mm6
+ paddusw mm5, mm3
+
+ paddusw mm0, mm7 ; Add to the acumulator (mm0)
+ paddusw mm1, mm5 ; Add to the acumulator (mm1)
+
+ movq mm3, mm4
+
+ punpcklbw mm3, mm6 ; Unpack to words and add
+ punpckhbw mm4, mm6
+ movq mm5, mm2
+ paddusw mm4, mm3
+
+
+
+ punpcklbw mm5, mm6 ; Unpack to words and add
+ punpckhbw mm2, mm6
+ paddusw mm2, mm5
+
+ ; Loop termination
+
+ add esi, edx ; update pointers to next row
+ paddusw mm0, mm4 ; Add to the acumulator (mm0)
+ add edi, edx
+ sub ecx,2
+ paddusw mm1, mm2 ; Add to the acumulator (mm1)
+ test ecx, ecx ; check rowsleft
+ jnz near .nextrow
+
+ paddusw mm0, mm1
+ movq mm2, mm0 ; Copy mm0 to mm2
+ psrlq mm2, 32
+ paddusw mm0, mm2 ; Add
+ movq mm3, mm0
+ psrlq mm3, 16
+ paddusw mm0, mm3
+ movd eax, mm0 ; Store return value
+ and eax, 0xffff
+
+ pop edi
+ pop esi
+ pop edx
+ pop ecx
+ pop ebx
+
+ pop ebp ; restore stack pointer
+
+ ;emms ; clear mmx registers
+ ret ; return
+
+global pix_abs16x16_sse
+
+; int pix_abs16x16_mmx(unsigned char *pix1,unsigned char *pix2, int lx, int h);
+; esi = p1 (init: blk1)
+; edi = p2 (init: blk2)
+; ecx = rowsleft (init: h)
+; edx = lx;
+
+; mm0 = distance accumulators (4 words)
+; mm1 = distance accumulators (4 words)
+; mm2 = temp
+; mm3 = temp
+; mm4 = temp
+; mm5 = temp
+; mm6 = temp
+; mm7 = temp
+
+
+align 32
+pix_abs16x16_sse:
+ push ebp ; save frame pointer
+ mov ebp, esp
+
+ push ebx ; Saves registers (called saves convention in
+ push ecx ; x86 GCC it seems)
+ push edx ;
+ push esi
+ push edi
+
+ pxor mm0, mm0 ; zero acculumators
+ pxor mm1, mm1
+ mov esi, [ebp+8] ; get pix1
+ mov edi, [ebp+12] ; get pix2
+ mov edx, [ebp+16] ; get lx
+ mov ecx, [ebp+20] ; get rowsleft
+ jmp .next4row
+align 32
+
+.next4row:
+ ; First row
+
+ movq mm4, [edi] ; load first 8 bytes of pix2 row
+ movq mm5, [edi+8] ; load last 8 bytes of pix2 row
+ psadbw mm4, [esi] ; SAD of first 8 bytes
+ psadbw mm5, [esi+8] ; SAD of last 8 bytes
+ paddw mm0, mm4 ; Add to acumulators
+ paddw mm1, mm5
+
+ ; Second row
+
+ add edi, edx;
+ add esi, edx;
+
+ movq mm6, [edi] ; load first 8 bytes of pix2 row
+ movq mm7, [edi+8] ; load last 8 bytes of pix2 row
+ psadbw mm6, [esi] ; SAD of first 8 bytes
+ psadbw mm7, [esi+8] ; SAD of last 8 bytes
+ paddw mm0, mm6 ; Add to acumulators
+ paddw mm1, mm7
+
+ ; Third row
+
+ add edi, edx;
+ add esi, edx;
+
+ movq mm4, [edi] ; load first 8 bytes of pix2 row
+ movq mm5, [edi+8] ; load last 8 bytes of pix2 row
+ psadbw mm4, [esi] ; SAD of first 8 bytes
+ psadbw mm5, [esi+8] ; SAD of last 8 bytes
+ paddw mm0, mm4 ; Add to acumulators
+ paddw mm1, mm5
+
+ ; Fourth row
+
+ add edi, edx;
+ add esi, edx;
+
+ movq mm6, [edi] ; load first 8 bytes of pix2 row
+ movq mm7, [edi+8] ; load last 8 bytes of pix2 row
+ psadbw mm6, [esi] ; SAD of first 8 bytes
+ psadbw mm7, [esi+8] ; SAD of last 8 bytes
+ paddw mm0, mm6 ; Add to acumulators
+ paddw mm1, mm7
+
+ ; Loop termination
+
+ add esi, edx ; update pointers to next row
+ add edi, edx
+ sub ecx,4
+ test ecx, ecx ; check rowsleft
+ jnz near .next4row
+
+ paddd mm0, mm1 ; Sum acumulators
+ movd eax, mm0 ; Store return value
+
+ pop edi
+ pop esi
+ pop edx
+ pop ecx
+ pop ebx
+
+ pop ebp ; restore stack pointer
+
+ ;emms ; clear mmx registers
+ ret ; return
+
+global pix_abs16x16_x2_mmx
+
+; int pix_abs16x16_x2_mmx(unsigned char *pix1,unsigned char *pix2, int lx, int h);
+; esi = p1 (init: blk1)
+; edi = p2 (init: blk2)
+; ecx = rowsleft (init: h)
+; edx = lx;
+
+; mm0 = distance accumulators (4 words)
+; mm1 = distance accumulators (4 words)
+; mm2 = temp
+; mm3 = temp
+; mm4 = temp
+; mm5 = temp
+; mm6 = 0
+; mm7 = temp
+
+
+align 32
+pix_abs16x16_x2_mmx:
+ push ebp ; save frame pointer
+ mov ebp, esp
+
+ push ebx ; Saves registers (called saves convention in
+ push ecx ; x86 GCC it seems)
+ push edx ;
+ push esi
+ push edi
+
+ pxor mm0, mm0 ; zero acculumators
+ pxor mm1, mm1
+ pxor mm6, mm6
+ mov esi, [ebp+8] ; get pix1
+ mov edi, [ebp+12] ; get pix2
+ mov edx, [ebp+16] ; get lx
+ mov ecx, [ebp+20] ; get rowsleft
+ jmp .nextrow_x2
+align 32
+
+.nextrow_x2:
+ ; First 8 bytes of the row
+
+ movq mm4, [edi] ; load first 8 bytes of pix2 row
+ movq mm5, [edi+1] ; load bytes 1-8 of pix2 row
+
+ movq mm2, mm4 ; copy mm4 on mm2
+ movq mm3, mm5 ; copy mm5 on mm3
+ punpcklbw mm4, mm6 ; first 4 bytes of [edi] on mm4
+ punpcklbw mm5, mm6 ; first 4 bytes of [edi+1] on mm5
+ paddusw mm4, mm5 ; mm4 := first 4 bytes interpolated in words
+ psrlw mm4, 1
+
+ punpckhbw mm2, mm6 ; last 4 bytes of [edi] on mm2
+ punpckhbw mm3, mm6 ; last 4 bytes of [edi+1] on mm3
+ paddusw mm2, mm3 ; mm2 := last 4 bytes interpolated in words
+ psrlw mm2, 1
+
+ packuswb mm4, mm2 ; pack 8 bytes interpolated on mm4
+ movq mm5,[esi] ; load first 8 bytes of pix1 row
+
+ movq mm3, mm4 ; mm4 := abs(mm4-mm5)
+ psubusb mm4, mm5
+ psubusb mm5, mm3
+ por mm4, mm5
+
+ ; Last 8 bytes of the row
+
+ movq mm7, [edi+8] ; load last 8 bytes of pix2 row
+ movq mm5, [edi+9] ; load bytes 10-17 of pix2 row
+
+ movq mm2, mm7 ; copy mm7 on mm2
+ movq mm3, mm5 ; copy mm5 on mm3
+ punpcklbw mm7, mm6 ; first 4 bytes of [edi+8] on mm7
+ punpcklbw mm5, mm6 ; first 4 bytes of [edi+9] on mm5
+ paddusw mm7, mm5 ; mm1 := first 4 bytes interpolated in words
+ psrlw mm7, 1
+
+ punpckhbw mm2, mm6 ; last 4 bytes of [edi] on mm2
+ punpckhbw mm3, mm6 ; last 4 bytes of [edi+1] on mm3
+ paddusw mm2, mm3 ; mm2 := last 4 bytes interpolated in words
+ psrlw mm2, 1
+
+ packuswb mm7, mm2 ; pack 8 bytes interpolated on mm1
+ movq mm5,[esi+8] ; load last 8 bytes of pix1 row
+
+ movq mm3, mm7 ; mm7 := abs(mm1-mm5)
+ psubusb mm7, mm5
+ psubusb mm5, mm3
+ por mm7, mm5
+
+ ; Now mm4 and mm7 have 16 absdiffs to add
+
+ movq mm3, mm4 ; Make copies of these bytes
+ movq mm2, mm7
+
+ punpcklbw mm4, mm6 ; Unpack to words and add
+ punpcklbw mm7, mm6
+ paddusw mm4, mm7
+ paddusw mm0, mm4 ; Add to the acumulator (mm0)
+
+ punpckhbw mm3, mm6 ; Unpack to words and add
+ punpckhbw mm2, mm6
+ paddusw mm3, mm2
+ paddusw mm1, mm3 ; Add to the acumulator (mm1)
+
+ ; Loop termination
+
+ add esi, edx ; update pointers to next row
+ add edi, edx
+
+ sub ecx,1
+ test ecx, ecx ; check rowsleft
+ jnz near .nextrow_x2
+
+ paddusw mm0, mm1
+
+ movq mm1, mm0 ; Copy mm0 to mm1
+ psrlq mm1, 32
+ paddusw mm0, mm1 ; Add
+ movq mm2, mm0
+ psrlq mm2, 16
+ paddusw mm0, mm2
+ movd eax, mm0 ; Store return value
+ and eax, 0xffff
+
+ pop edi
+ pop esi
+ pop edx
+ pop ecx
+ pop ebx
+
+ pop ebp ; restore stack pointer
+
+ emms ; clear mmx registers
+ ret ; return
+
+global pix_abs16x16_y2_mmx
+
+; int pix_abs16x16_y2_mmx(unsigned char *pix1,unsigned char *pix2, int lx, int h);
+; esi = p1 (init: blk1)
+; edi = p2 (init: blk2)
+; ebx = p2 + lx
+; ecx = rowsleft (init: h)
+; edx = lx;
+
+; mm0 = distance accumulators (4 words)
+; mm1 = distance accumulators (4 words)
+; mm2 = temp
+; mm3 = temp
+; mm4 = temp
+; mm5 = temp
+; mm6 = 0
+; mm7 = temp
+
+
+align 32
+pix_abs16x16_y2_mmx:
+ push ebp ; save frame pointer
+ mov ebp, esp
+
+ push ebx ; Saves registers (called saves convention in
+ push ecx ; x86 GCC it seems)
+ push edx ;
+ push esi
+ push edi
+
+ pxor mm0, mm0 ; zero acculumators
+ pxor mm1, mm1
+ pxor mm6, mm6
+ mov esi, [ebp+8] ; get pix1
+ mov edi, [ebp+12] ; get pix2
+ mov edx, [ebp+16] ; get lx
+ mov ecx, [ebp+20] ; get rowsleft
+ mov ebx, edi
+ add ebx, edx
+ jmp .nextrow_y2
+align 32
+
+.nextrow_y2:
+ ; First 8 bytes of the row
+
+ movq mm4, [edi] ; load first 8 bytes of pix2 row
+ movq mm5, [ebx] ; load bytes 1-8 of pix2 row
+
+ movq mm2, mm4 ; copy mm4 on mm2
+ movq mm3, mm5 ; copy mm5 on mm3
+ punpcklbw mm4, mm6 ; first 4 bytes of [edi] on mm4
+ punpcklbw mm5, mm6 ; first 4 bytes of [ebx] on mm5
+ paddusw mm4, mm5 ; mm4 := first 4 bytes interpolated in words
+ psrlw mm4, 1
+
+ punpckhbw mm2, mm6 ; last 4 bytes of [edi] on mm2
+ punpckhbw mm3, mm6 ; last 4 bytes of [edi+1] on mm3
+ paddusw mm2, mm3 ; mm2 := last 4 bytes interpolated in words
+ psrlw mm2, 1
+
+ packuswb mm4, mm2 ; pack 8 bytes interpolated on mm4
+ movq mm5,[esi] ; load first 8 bytes of pix1 row
+
+ movq mm3, mm4 ; mm4 := abs(mm4-mm5)
+ psubusb mm4, mm5
+ psubusb mm5, mm3
+ por mm4, mm5
+
+ ; Last 8 bytes of the row
+
+ movq mm7, [edi+8] ; load last 8 bytes of pix2 row
+ movq mm5, [ebx+8] ; load bytes 10-17 of pix2 row
+
+ movq mm2, mm7 ; copy mm7 on mm2
+ movq mm3, mm5 ; copy mm5 on mm3
+ punpcklbw mm7, mm6 ; first 4 bytes of [edi+8] on mm7
+ punpcklbw mm5, mm6 ; first 4 bytes of [ebx+8] on mm5
+ paddusw mm7, mm5 ; mm1 := first 4 bytes interpolated in words
+ psrlw mm7, 1
+
+ punpckhbw mm2, mm6 ; last 4 bytes of [edi+8] on mm2
+ punpckhbw mm3, mm6 ; last 4 bytes of [ebx+8] on mm3
+ paddusw mm2, mm3 ; mm2 := last 4 bytes interpolated in words
+ psrlw mm2, 1
+
+ packuswb mm7, mm2 ; pack 8 bytes interpolated on mm1
+ movq mm5,[esi+8] ; load last 8 bytes of pix1 row
+
+ movq mm3, mm7 ; mm7 := abs(mm1-mm5)
+ psubusb mm7, mm5
+ psubusb mm5, mm3
+ por mm7, mm5
+
+ ; Now mm4 and mm7 have 16 absdiffs to add
+
+ movq mm3, mm4 ; Make copies of these bytes
+ movq mm2, mm7
+
+ punpcklbw mm4, mm6 ; Unpack to words and add
+ punpcklbw mm7, mm6
+ paddusw mm4, mm7
+ paddusw mm0, mm4 ; Add to the acumulator (mm0)
+
+ punpckhbw mm3, mm6 ; Unpack to words and add
+ punpckhbw mm2, mm6
+ paddusw mm3, mm2
+ paddusw mm1, mm3 ; Add to the acumulator (mm1)
+
+ ; Loop termination
+
+ add esi, edx ; update pointers to next row
+ add edi, edx
+ add ebx, edx
+ sub ecx,1
+ test ecx, ecx ; check rowsleft
+ jnz near .nextrow_y2
+
+ paddusw mm0, mm1
+
+ movq mm1, mm0 ; Copy mm0 to mm1
+ psrlq mm1, 32
+ paddusw mm0, mm1 ; Add
+ movq mm2, mm0
+ psrlq mm2, 16
+ paddusw mm0, mm2
+ movd eax, mm0 ; Store return value
+ and eax, 0xffff
+
+ pop edi
+ pop esi
+ pop edx
+ pop ecx
+ pop ebx
+
+ pop ebp ; restore stack pointer
+
+ emms ; clear mmx registers
+ ret ; return
+
+global pix_abs16x16_xy2_mmx
+
+; int pix_abs16x16_xy2_mmx(unsigned char *p1,unsigned char *p2,int lx,int h);
+
+; esi = p1 (init: blk1)
+; edi = p2 (init: blk2)
+; ebx = p1+lx
+; ecx = rowsleft (init: h)
+; edx = lx;
+
+; mm0 = distance accumulators (4 words)
+; mm1 = bytes p2
+; mm2 = bytes p1
+; mm3 = bytes p1+lx
+; I'd love to find someplace to stash p1+1 and p1+lx+1's bytes
+; but I don't think thats going to happen in iA32-land...
+; mm4 = temp 4 bytes in words interpolating p1, p1+1
+; mm5 = temp 4 bytes in words from p2
+; mm6 = temp comparison bit mask p1,p2
+; mm7 = temp comparison bit mask p2,p1
+
+
+align 32
+pix_abs16x16_xy2_mmx:
+ push ebp ; save stack pointer
+ mov ebp, esp ; so that we can do this
+
+ push ebx ; Saves registers (called saves convention in
+ push ecx ; x86 GCC it seems)
+ push edx ;
+ push esi
+ push edi
+
+ pxor mm0, mm0 ; zero acculumators
+
+ mov esi, [ebp+12] ; get p1
+ mov edi, [ebp+8] ; get p2
+ mov edx, [ebp+16] ; get lx
+ mov ecx, [ebp+20] ; rowsleft := h
+ mov ebx, esi
+ add ebx, edx
+ jmp .nextrowmm11 ; snap to it
+align 32
+.nextrowmm11:
+
+ ;;
+ ;; First 8 bytes of row
+ ;;
+
+ ;; First 4 bytes of 8
+
+ movq mm4, [esi] ; mm4 := first 4 bytes p1
+ pxor mm7, mm7
+ movq mm2, mm4 ; mm2 records all 8 bytes
+ punpcklbw mm4, mm7 ; First 4 bytes p1 in Words...
+
+ movq mm6, [ebx] ; mm6 := first 4 bytes p1+lx
+ movq mm3, mm6 ; mm3 records all 8 bytes
+ punpcklbw mm6, mm7
+ paddw mm4, mm6
+
+
+ movq mm5, [esi+1] ; mm5 := first 4 bytes p1+1
+ punpcklbw mm5, mm7 ; First 4 bytes p1 in Words...
+ paddw mm4, mm5
+ movq mm6, [ebx+1] ; mm6 := first 4 bytes p1+lx+1
+ punpcklbw mm6, mm7
+ paddw mm4, mm6
+
+ psrlw mm4, 2 ; mm4 := First 4 bytes interpolated in words
+
+ movq mm5, [edi] ; mm5:=first 4 bytes of p2 in words
+ movq mm1, mm5
+ punpcklbw mm5, mm7
+
+ movq mm7,mm4
+ pcmpgtw mm7,mm5 ; mm7 := [i : W0..3,mm4>mm5]
+
+ movq mm6,mm4 ; mm6 := [i : W0..3, (mm4-mm5)*(mm4-mm5 > 0)]
+ psubw mm6,mm5
+ pand mm6, mm7
+
+ paddw mm0, mm6 ; Add to accumulator
+
+ movq mm6,mm5 ; mm6 := [i : W0..3,mm5>mm4]
+ pcmpgtw mm6,mm4
+ psubw mm5,mm4 ; mm5 := [i : B0..7, (mm5-mm4)*(mm5-mm4 > 0)]
+ pand mm5, mm6
+
+ paddw mm0, mm5 ; Add to accumulator
+
+ ;; Second 4 bytes of 8
+
+ movq mm4, mm2 ; mm4 := Second 4 bytes p1 in words
+ pxor mm7, mm7
+ punpckhbw mm4, mm7
+ movq mm6, mm3 ; mm6 := Second 4 bytes p1+1 in words
+ punpckhbw mm6, mm7
+ paddw mm4, mm6
+
+ movq mm5, [esi+1] ; mm5 := first 4 bytes p1+1
+ punpckhbw mm5, mm7 ; First 4 bytes p1 in Words...
+ paddw mm4, mm5
+ movq mm6, [ebx+1] ; mm6 := first 4 bytes p1+lx+1
+ punpckhbw mm6, mm7
+ paddw mm4, mm6
+
+ psrlw mm4, 2 ; mm4 := First 4 bytes interpolated in words
+
+ movq mm5, mm1 ; mm5:= second 4 bytes of p2 in words
+ punpckhbw mm5, mm7
+
+ movq mm7,mm4
+ pcmpgtw mm7,mm5 ; mm7 := [i : W0..3,mm4>mm5]
+
+ movq mm6,mm4 ; mm6 := [i : W0..3, (mm4-mm5)*(mm4-mm5 > 0)]
+ psubw mm6,mm5
+ pand mm6, mm7
+
+ paddw mm0, mm6 ; Add to accumulator
+
+ movq mm6,mm5 ; mm6 := [i : W0..3,mm5>mm4]
+ pcmpgtw mm6,mm4
+ psubw mm5,mm4 ; mm5 := [i : B0..7, (mm5-mm4)*(mm5-mm4 > 0)]
+ pand mm5, mm6
+
+ paddw mm0, mm5 ; Add to accumulator
+
+
+ ;;
+ ;; Second 8 bytes of row
+ ;;
+ ;; First 4 bytes of 8
+
+ movq mm4, [esi+8] ; mm4 := first 4 bytes p1+8
+ pxor mm7, mm7
+ movq mm2, mm4 ; mm2 records all 8 bytes
+ punpcklbw mm4, mm7 ; First 4 bytes p1 in Words...
+
+ movq mm6, [ebx+8] ; mm6 := first 4 bytes p1+lx+8
+ movq mm3, mm6 ; mm3 records all 8 bytes
+ punpcklbw mm6, mm7
+ paddw mm4, mm6
+
+
+ movq mm5, [esi+9] ; mm5 := first 4 bytes p1+9
+ punpcklbw mm5, mm7 ; First 4 bytes p1 in Words...
+ paddw mm4, mm5
+ movq mm6, [ebx+9] ; mm6 := first 4 bytes p1+lx+9
+ punpcklbw mm6, mm7
+ paddw mm4, mm6
+
+ psrlw mm4, 2 ; mm4 := First 4 bytes interpolated in words
+
+ movq mm5, [edi+8] ; mm5:=first 4 bytes of p2+8 in words
+ movq mm1, mm5
+ punpcklbw mm5, mm7
+
+ movq mm7,mm4
+ pcmpgtw mm7,mm5 ; mm7 := [i : W0..3,mm4>mm5]
+
+ movq mm6,mm4 ; mm6 := [i : W0..3, (mm4-mm5)*(mm4-mm5 > 0)]
+ psubw mm6,mm5
+ pand mm6, mm7
+
+ paddw mm0, mm6 ; Add to accumulator
+
+ movq mm6,mm5 ; mm6 := [i : W0..3,mm5>mm4]
+ pcmpgtw mm6,mm4
+ psubw mm5,mm4 ; mm5 := [i : B0..7, (mm5-mm4)*(mm5-mm4 > 0)]
+ pand mm5, mm6
+
+ paddw mm0, mm5 ; Add to accumulator
+
+ ;; Second 4 bytes of 8
+
+ movq mm4, mm2 ; mm4 := Second 4 bytes p1 in words
+ pxor mm7, mm7
+ punpckhbw mm4, mm7
+ movq mm6, mm3 ; mm6 := Second 4 bytes p1+1 in words
+ punpckhbw mm6, mm7
+ paddw mm4, mm6
+
+ movq mm5, [esi+9] ; mm5 := first 4 bytes p1+1
+ punpckhbw mm5, mm7 ; First 4 bytes p1 in Words...
+ paddw mm4, mm5
+ movq mm6, [ebx+9] ; mm6 := first 4 bytes p1+lx+1
+ punpckhbw mm6, mm7
+ paddw mm4, mm6
+
+ psrlw mm4, 2 ; mm4 := First 4 bytes interpolated in words
+
+ movq mm5, mm1 ; mm5:= second 4 bytes of p2 in words
+ punpckhbw mm5, mm7
+
+ movq mm7,mm4
+ pcmpgtw mm7,mm5 ; mm7 := [i : W0..3,mm4>mm5]
+
+ movq mm6,mm4 ; mm6 := [i : W0..3, (mm4-mm5)*(mm4-mm5 > 0)]
+ psubw mm6,mm5
+ pand mm6, mm7
+
+ paddw mm0, mm6 ; Add to accumulator
+
+ movq mm6,mm5 ; mm6 := [i : W0..3,mm5>mm4]
+ pcmpgtw mm6,mm4
+ psubw mm5,mm4 ; mm5 := [i : B0..7, (mm5-mm4)*(mm5-mm4 > 0)]
+ pand mm5, mm6
+
+ paddw mm0, mm5 ; Add to accumulator
+
+
+ ;;
+ ;; Loop termination condition... and stepping
+ ;;
+
+ add esi, edx ; update pointer to next row
+ add edi, edx ; ditto
+ add ebx, edx
+
+ sub ecx,1
+ test ecx, ecx ; check rowsleft
+ jnz near .nextrowmm11
+
+ ;; Sum the Accumulators
+ movq mm4, mm0
+ psrlq mm4, 32
+ paddw mm0, mm4
+ movq mm6, mm0
+ psrlq mm6, 16
+ paddw mm0, mm6
+ movd eax, mm0 ; store return value
+ and eax, 0xffff
+
+ pop edi
+ pop esi
+ pop edx
+ pop ecx
+ pop ebx
+
+ pop ebp ; restore stack pointer
+
+ emms ; clear mmx registers
+ ret ; we now return you to your regular programming
+
+