aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/i386/dsputil_h264_template_mmx.c
diff options
context:
space:
mode:
authorLoren Merritt <lorenm@u.washington.edu>2005-10-27 06:45:29 +0000
committerLoren Merritt <lorenm@u.washington.edu>2005-10-27 06:45:29 +0000
commita6624e21cbf9d943583c920666c0e40d3252ff4b (patch)
treefc7b35be132ef7f62922f820417bd7a18e8cd263 /libavcodec/i386/dsputil_h264_template_mmx.c
parentf368375cef3283098ab4667129a3480e8f6ed68c (diff)
downloadffmpeg-a6624e21cbf9d943583c920666c0e40d3252ff4b.tar.gz
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
2-4% overall speedup. Originally committed as revision 4666 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/i386/dsputil_h264_template_mmx.c')
-rw-r--r--libavcodec/i386/dsputil_h264_template_mmx.c249
1 files changed, 215 insertions, 34 deletions
diff --git a/libavcodec/i386/dsputil_h264_template_mmx.c b/libavcodec/i386/dsputil_h264_template_mmx.c
index 4cd4d52d85..2cc94636e1 100644
--- a/libavcodec/i386/dsputil_h264_template_mmx.c
+++ b/libavcodec/i386/dsputil_h264_template_mmx.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>
+ * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
+ * Loren Merritt
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -18,22 +19,138 @@
/**
* MMX optimized version of (put|avg)_h264_chroma_mc8.
- * H264_CHROMA_MC8_TMPL must be defined to the desired function name and
- * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg.
+ * H264_CHROMA_MC8_TMPL must be defined to the desired function name
+ * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg
+ * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
*/
static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
{
uint64_t AA __align8;
uint64_t DD __align8;
- unsigned long srcos = (long)src & 7;
- uint64_t sh1 __align8 = srcos * 8;
- uint64_t sh2 __align8 = 56 - sh1;
int i;
+ if(y==0 && x==0) {
+ /* no filter needed */
+ H264_CHROMA_MC8_MV0(dst, src, stride, h);
+ return;
+ }
+
assert(x<8 && y<8 && x>=0 && y>=0);
- asm volatile("movd %1, %%mm4\n\t"
- "movd %2, %%mm6\n\t"
+ if(y==0)
+ {
+ /* horizontal filter only */
+ asm volatile("movd %0, %%mm5\n\t"
+ "punpcklwd %%mm5, %%mm5\n\t"
+ "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */
+ "movq %1, %%mm4\n\t"
+ "pxor %%mm7, %%mm7\n\t"
+ "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */
+ : : "rm" (x), "m" (ff_pw_8));
+
+ for(i=0; i<h; i++) {
+ asm volatile(
+ /* mm0 = src[0..7], mm1 = src[1..8] */
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ : : "m" (src[0]), "m" (src[1]));
+
+ asm volatile(
+ /* [mm2,mm3] = A * src[0..7] */
+ "movq %%mm0, %%mm2\n\t"
+ "punpcklbw %%mm7, %%mm2\n\t"
+ "pmullw %%mm4, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "pmullw %%mm4, %%mm3\n\t"
+
+ /* [mm2,mm3] += B * src[1..8] */
+ "movq %%mm1, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "pmullw %%mm5, %%mm0\n\t"
+ "punpckhbw %%mm7, %%mm1\n\t"
+ "pmullw %%mm5, %%mm1\n\t"
+ "paddw %%mm0, %%mm2\n\t"
+ "paddw %%mm1, %%mm3\n\t"
+
+ /* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */
+ "paddw %1, %%mm2\n\t"
+ "paddw %1, %%mm3\n\t"
+ "psrlw $3, %%mm2\n\t"
+ "psrlw $3, %%mm3\n\t"
+ "packuswb %%mm3, %%mm2\n\t"
+ H264_CHROMA_OP(%0, %%mm2)
+ "movq %%mm2, %0\n\t"
+ : "=m" (dst[0]) : "m" (ff_pw_4));
+
+ src += stride;
+ dst += stride;
+ }
+ return;
+ }
+
+ if(x==0)
+ {
+ /* vertical filter only */
+ asm volatile("movd %0, %%mm6\n\t"
+ "punpcklwd %%mm6, %%mm6\n\t"
+ "punpckldq %%mm6, %%mm6\n\t" /* mm6 = C = y */
+ "movq %1, %%mm4\n\t"
+ "pxor %%mm7, %%mm7\n\t"
+ "psubw %%mm6, %%mm4\n\t" /* mm4 = A = 8-y */
+ : : "rm" (y), "m" (ff_pw_8));
+
+ asm volatile(
+ /* mm0 = src[0..7] */
+ "movq %0, %%mm0\n\t"
+ : : "m" (src[0]));
+
+ for(i=0; i<h; i++) {
+ asm volatile(
+ /* [mm2,mm3] = A * src[0..7] */
+ "movq %mm0, %mm2\n\t"
+ "punpcklbw %mm7, %mm2\n\t"
+ "pmullw %mm4, %mm2\n\t"
+ "movq %mm0, %mm3\n\t"
+ "punpckhbw %mm7, %mm3\n\t"
+ "pmullw %mm4, %mm3\n\t");
+
+ src += stride;
+ asm volatile(
+ /* mm0 = src[0..7] */
+ "movq %0, %%mm0\n\t"
+ : : "m" (src[0]));
+
+ asm volatile(
+ /* [mm2,mm3] += C * src[0..7] */
+ "movq %mm0, %mm1\n\t"
+ "punpcklbw %mm7, %mm1\n\t"
+ "pmullw %mm6, %mm1\n\t"
+ "paddw %mm1, %mm2\n\t"
+ "movq %mm0, %mm5\n\t"
+ "punpckhbw %mm7, %mm5\n\t"
+ "pmullw %mm6, %mm5\n\t"
+ "paddw %mm5, %mm3\n\t");
+
+ asm volatile(
+ /* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */
+ "paddw %1, %%mm2\n\t"
+ "paddw %1, %%mm3\n\t"
+ "psrlw $3, %%mm2\n\t"
+ "psrlw $3, %%mm3\n\t"
+ "packuswb %%mm3, %%mm2\n\t"
+ H264_CHROMA_OP(%0, %%mm2)
+ "movq %%mm2, %0\n\t"
+ : "=m" (dst[0]) : "m" (ff_pw_4));
+
+ dst += stride;
+ }
+ return;
+ }
+
+ /* general case, bilinear */
+ asm volatile("movd %2, %%mm4\n\t"
+ "movd %3, %%mm6\n\t"
"punpcklwd %%mm4, %%mm4\n\t"
"punpcklwd %%mm6, %%mm6\n\t"
"punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
@@ -44,29 +161,20 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*
"psllw $3, %%mm6\n\t"
"movq %%mm5, %%mm7\n\t"
"paddw %%mm6, %%mm7\n\t"
- "movq %%mm4, %0\n\t" /* DD = x * y */
+ "movq %%mm4, %1\n\t" /* DD = x * y */
"psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */
"psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */
- "paddw %3, %%mm4\n\t"
+ "paddw %4, %%mm4\n\t"
"psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */
"pxor %%mm7, %%mm7\n\t"
- : "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
+ "movq %%mm4, %0\n\t"
+ : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
- asm volatile("movq %%mm4, %0" : "=m" (AA));
-
- src -= srcos;
asm volatile(
/* mm0 = src[0..7], mm1 = src[1..8] */
- "movq %0, %%mm1\n\t"
- "movq %1, %%mm0\n\t"
- "psrlq %2, %%mm1\n\t"
- "psllq %3, %%mm0\n\t"
- "movq %%mm0, %%mm4\n\t"
- "psllq $8, %%mm0\n\t"
- "por %%mm1, %%mm0\n\t"
- "psrlq $8, %%mm1\n\t"
- "por %%mm4, %%mm1\n\t"
- : : "m" (src[0]), "m" (src[8]), "m" (sh1), "m" (sh2));
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ : : "m" (src[0]), "m" (src[1]));
for(i=0; i<h; i++) {
asm volatile(
@@ -91,16 +199,9 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*
src += stride;
asm volatile(
/* mm0 = src[0..7], mm1 = src[1..8] */
- "movq %0, %%mm1\n\t"
- "movq %1, %%mm0\n\t"
- "psrlq %2, %%mm1\n\t"
- "psllq %3, %%mm0\n\t"
- "movq %%mm0, %%mm4\n\t"
- "psllq $8, %%mm0\n\t"
- "por %%mm1, %%mm0\n\t"
- "psrlq $8, %%mm1\n\t"
- "por %%mm4, %%mm1\n\t"
- : : "m" (src[0]), "m" (src[8]), "m" (sh1), "m" (sh2));
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ : : "m" (src[0]), "m" (src[1]));
asm volatile(
/* [mm2,mm3] += C * src[0..7] */
@@ -138,3 +239,83 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*
dst+= stride;
}
}
+
+static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ uint64_t AA __align8;
+ uint64_t DD __align8;
+ int i;
+
+ /* no special case for mv=(0,0) in 4x*, since it's much less common than in 8x*.
+ * could still save a few cycles, but maybe not worth the complexity. */
+
+ assert(x<8 && y<8 && x>=0 && y>=0);
+
+ asm volatile("movd %2, %%mm4\n\t"
+ "movd %3, %%mm6\n\t"
+ "punpcklwd %%mm4, %%mm4\n\t"
+ "punpcklwd %%mm6, %%mm6\n\t"
+ "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
+ "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */
+ "movq %%mm4, %%mm5\n\t"
+ "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */
+ "psllw $3, %%mm5\n\t"
+ "psllw $3, %%mm6\n\t"
+ "movq %%mm5, %%mm7\n\t"
+ "paddw %%mm6, %%mm7\n\t"
+ "movq %%mm4, %1\n\t" /* DD = x * y */
+ "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */
+ "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */
+ "paddw %4, %%mm4\n\t"
+ "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %%mm4, %0\n\t"
+ : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
+
+ asm volatile(
+ /* mm0 = src[0..3], mm1 = src[1..4] */
+ "movd %0, %%mm0\n\t"
+ "movd %1, %%mm1\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ : : "m" (src[0]), "m" (src[1]));
+
+ for(i=0; i<h; i++) {
+ asm volatile(
+ /* mm2 = A * src[0..3] + B * src[1..4] */
+ "movq %%mm0, %%mm2\n\t"
+ "pmullw %0, %%mm2\n\t"
+ "pmullw %%mm5, %%mm1\n\t"
+ "paddw %%mm1, %%mm2\n\t"
+ : : "m" (AA));
+
+ src += stride;
+ asm volatile(
+ /* mm0 = src[0..3], mm1 = src[1..4] */
+ "movd %0, %%mm0\n\t"
+ "movd %1, %%mm1\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ : : "m" (src[0]), "m" (src[1]));
+
+ asm volatile(
+ /* mm2 += C * src[0..3] + D * src[1..4] */
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "pmullw %%mm6, %%mm3\n\t"
+ "pmullw %0, %%mm4\n\t"
+ "paddw %%mm3, %%mm2\n\t"
+ "paddw %%mm4, %%mm2\n\t"
+ : : "m" (DD));
+
+ asm volatile(
+ /* dst[0..3] = pack((mm2 + 32) >> 6) */
+ "paddw %1, %%mm2\n\t"
+ "psrlw $6, %%mm2\n\t"
+ "packuswb %%mm7, %%mm2\n\t"
+ H264_CHROMA_OP4(%0, %%mm2, %%mm3)
+ "movd %%mm2, %0\n\t"
+ : "=m" (dst[0]) : "m" (ff_pw_32));
+ dst += stride;
+ }
+}