aboutsummaryrefslogtreecommitdiffstats
path: root/libswscale/x86/swscale_template.c
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-08-04 22:39:25 +0200
committerMichael Niedermayer <michaelni@gmx.at>2012-08-04 23:51:43 +0200
commite776ee8f294984f7643a3c45db803c7266e1edfd (patch)
treea1fd00ab7a0760ec0f2848aed9dc3f79d889e816 /libswscale/x86/swscale_template.c
parent88fc1438c693ffb7793aeb111d89775440491840 (diff)
parent8821ae649e61097ec57ca58472c3e4239c82913c (diff)
downloadffmpeg-e776ee8f294984f7643a3c45db803c7266e1edfd.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: lavr: fix handling of custom mix matrices fate: force pix_fmt in lagarith-rgb32 test fate: add tests for lagarith lossless video codec. ARMv6: vp8: fix stack allocation with Apple's assembler ARM: vp56: allow inline asm to build with clang fft: 3dnow: fix register name typo in DECL_IMDCT macro x86: dct32: port to cpuflags x86: build: replace mmx2 by mmxext Revert "wmapro: prevent division by zero when sample rate is unspecified" wmapro: prevent division by zero when sample rate is unspecified lagarith: fix color plane inversion for YUY2 output. lagarith: pad RGB buffer by 1 byte. dsputil: make add_hfyu_left_prediction_sse4() support unaligned src. Conflicts: doc/APIchanges libavcodec/lagarith.c libavfilter/x86/gradfun.c libavutil/cpu.h libavutil/version.h libswscale/utils.c libswscale/version.h libswscale/x86/yuv2rgb.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libswscale/x86/swscale_template.c')
-rw-r--r--libswscale/x86/swscale_template.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/libswscale/x86/swscale_template.c b/libswscale/x86/swscale_template.c
index 245bfdeadd..370a0ebe1b 100644
--- a/libswscale/x86/swscale_template.c
+++ b/libswscale/x86/swscale_template.c
@@ -23,13 +23,13 @@
#undef MOVNTQ2
#undef PREFETCH
-#if COMPILE_TEMPLATE_MMX2
+#if COMPILE_TEMPLATE_MMXEXT
#define PREFETCH "prefetchnta"
#else
#define PREFETCH " # nop"
#endif
-#if COMPILE_TEMPLATE_MMX2
+#if COMPILE_TEMPLATE_MMXEXT
#define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
#define MOVNTQ2 "movntq "
#else
@@ -38,7 +38,7 @@
#endif
#define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
-#if !COMPILE_TEMPLATE_MMX2
+#if !COMPILE_TEMPLATE_MMXEXT
static av_always_inline void
dither_8to16(const uint8_t *srcDither, int rot)
{
@@ -641,7 +641,7 @@ static void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter,
"cmp "#dstw", "#index" \n\t"\
" jb 1b \n\t"
-#if COMPILE_TEMPLATE_MMX2
+#if COMPILE_TEMPLATE_MMXEXT
#undef WRITEBGR24
#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
#else
@@ -1445,7 +1445,7 @@ static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0,
}
}
-#if COMPILE_TEMPLATE_MMX2
+#if COMPILE_TEMPLATE_MMXEXT
static void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
int dstWidth, const uint8_t *src,
int srcW, int xInc)
@@ -1627,7 +1627,7 @@ static void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *dst2,
dst2[i] = src2[srcW-1]*128;
}
}
-#endif /* COMPILE_TEMPLATE_MMX2 */
+#endif /* COMPILE_TEMPLATE_MMXEXT */
static av_cold void RENAME(sws_init_swScale)(SwsContext *c)
{
@@ -1691,17 +1691,17 @@ static av_cold void RENAME(sws_init_swScale)(SwsContext *c)
if (c->srcBpc == 8 && c->dstBpc <= 14) {
// Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
-#if COMPILE_TEMPLATE_MMX2
+#if COMPILE_TEMPLATE_MMXEXT
if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
{
c->hyscale_fast = RENAME(hyscale_fast);
c->hcscale_fast = RENAME(hcscale_fast);
} else {
-#endif /* COMPILE_TEMPLATE_MMX2 */
+#endif /* COMPILE_TEMPLATE_MMXEXT */
c->hyscale_fast = NULL;
c->hcscale_fast = NULL;
-#if COMPILE_TEMPLATE_MMX2
+#if COMPILE_TEMPLATE_MMXEXT
}
-#endif /* COMPILE_TEMPLATE_MMX2 */
+#endif /* COMPILE_TEMPLATE_MMXEXT */
}
}