aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec
diff options
context:
space:
mode:
authorMatthieu Bouron <matthieu.bouron@stupeflix.com>2016-06-27 17:21:04 +0200
committerMatthieu Bouron <matthieu.bouron@stupeflix.com>2016-06-27 17:21:18 +0200
commit9eb3da2f9942cf1b1148d242bccfc383f666feb6 (patch)
tree12a571ac330c6301fb68af2c2769a4c715cdf459 /libavcodec
parent39d6d3618d48625decaff7d9bdbb45b44ef2a805 (diff)
downloadffmpeg-9eb3da2f9942cf1b1148d242bccfc383f666feb6.tar.gz
asm: FF_-prefix internal macros used in inline assembly
See merge commit '39d6d3618d48625decaff7d9bdbb45b44ef2a805'.
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/x86/cabac.h20
-rw-r--r--libavcodec/x86/h264_i386.h12
-rw-r--r--libavcodec/x86/hpeldsp_rnd_template.c56
-rw-r--r--libavcodec/x86/me_cmp_init.c44
-rw-r--r--libavcodec/x86/mpegvideo.c88
-rw-r--r--libavcodec/x86/mpegvideoenc_template.c36
-rw-r--r--libavcodec/x86/rnd_template.c44
-rw-r--r--libavcodec/x86/snowdsp.c180
-rw-r--r--libavcodec/x86/vc1dsp_mmx.c6
9 files changed, 243 insertions, 243 deletions
diff --git a/libavcodec/x86/cabac.h b/libavcodec/x86/cabac.h
index 4795f5bb07..cfd3b759c9 100644
--- a/libavcodec/x86/cabac.h
+++ b/libavcodec/x86/cabac.h
@@ -45,7 +45,7 @@
#define END_CHECK(end) ""
#else
#define END_CHECK(end) \
- "cmp "end" , %%"REG_c" \n\t"\
+ "cmp "end" , %%"FF_REG_c" \n\t"\
"jge 1f \n\t"
#endif
@@ -92,11 +92,11 @@
"mov "tmpbyte" , "statep" \n\t"\
"test "lowword" , "lowword" \n\t"\
"jnz 2f \n\t"\
- "mov "byte" , %%"REG_c" \n\t"\
+ "mov "byte" , %%"FF_REG_c" \n\t"\
END_CHECK(end)\
- "add"OPSIZE" $2 , "byte" \n\t"\
+ "add"FF_OPSIZE" $2 , "byte" \n\t"\
"1: \n\t"\
- "movzwl (%%"REG_c") , "tmp" \n\t"\
+ "movzwl (%%"FF_REG_c") , "tmp" \n\t"\
"lea -1("low") , %%ecx \n\t"\
"xor "low" , %%ecx \n\t"\
"shr $15 , %%ecx \n\t"\
@@ -153,11 +153,11 @@
"mov "tmpbyte" , "statep" \n\t"\
"test "lowword" , "lowword" \n\t"\
" jnz 2f \n\t"\
- "mov "byte" , %%"REG_c" \n\t"\
+ "mov "byte" , %%"FF_REG_c" \n\t"\
END_CHECK(end)\
- "add"OPSIZE" $2 , "byte" \n\t"\
+ "add"FF_OPSIZE" $2 , "byte" \n\t"\
"1: \n\t"\
- "movzwl (%%"REG_c") , "tmp" \n\t"\
+ "movzwl (%%"FF_REG_c") , "tmp" \n\t"\
"lea -1("low") , %%ecx \n\t"\
"xor "low" , %%ecx \n\t"\
"shr $15 , %%ecx \n\t"\
@@ -203,7 +203,7 @@ static av_always_inline int get_cabac_inline_x86(CABACContext *c,
"i"(offsetof(CABACContext, bytestream_end))
TABLES_ARG
,"1"(c->low), "2"(c->range)
- : "%"REG_c, "memory"
+ : "%"FF_REG_c, "memory"
);
return bit & 1;
}
@@ -240,7 +240,7 @@ static av_always_inline int get_cabac_bypass_sign_x86(CABACContext *c, int val)
"addl %%edx, %%eax \n\t"
"cmp %c5(%2), %1 \n\t"
"jge 1f \n\t"
- "add"OPSIZE" $2, %c4(%2) \n\t"
+ "add"FF_OPSIZE" $2, %c4(%2) \n\t"
#endif
"1: \n\t"
"movl %%eax, %c3(%2) \n\t"
@@ -281,7 +281,7 @@ static av_always_inline int get_cabac_bypass_x86(CABACContext *c)
"addl %%ecx, %%eax \n\t"
"cmp %c5(%2), %1 \n\t"
"jge 1f \n\t"
- "add"OPSIZE" $2, %c4(%2) \n\t"
+ "add"FF_OPSIZE" $2, %c4(%2) \n\t"
"1: \n\t"
"movl %%eax, %c3(%2) \n\t"
diff --git a/libavcodec/x86/h264_i386.h b/libavcodec/x86/h264_i386.h
index 4dfbc30933..19cd128381 100644
--- a/libavcodec/x86/h264_i386.h
+++ b/libavcodec/x86/h264_i386.h
@@ -91,13 +91,13 @@ static int decode_significance_x86(CABACContext *c, int max_coeff,
"sub %10, %1 \n\t"
"mov %2, %0 \n\t"
"movl %7, %%ecx \n\t"
- "add %1, %%"REG_c" \n\t"
+ "add %1, %%"FF_REG_c" \n\t"
"movl %%ecx, (%0) \n\t"
"test $1, %4 \n\t"
" jnz 5f \n\t"
- "add"OPSIZE" $4, %2 \n\t"
+ "add"FF_OPSIZE" $4, %2 \n\t"
"4: \n\t"
"add $1, %1 \n\t"
@@ -105,7 +105,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff,
" jb 3b \n\t"
"mov %2, %0 \n\t"
"movl %7, %%ecx \n\t"
- "add %1, %%"REG_c" \n\t"
+ "add %1, %%"FF_REG_c" \n\t"
"movl %%ecx, (%0) \n\t"
"5: \n\t"
"add %9, %k0 \n\t"
@@ -116,7 +116,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff,
"i"(offsetof(CABACContext, bytestream)),
"i"(offsetof(CABACContext, bytestream_end))
TABLES_ARG
- : "%"REG_c, "memory"
+ : "%"FF_REG_c, "memory"
);
return coeff_count;
}
@@ -183,7 +183,7 @@ static int decode_significance_8x8_x86(CABACContext *c,
"test $1, %4 \n\t"
" jnz 5f \n\t"
- "add"OPSIZE" $4, %2 \n\t"
+ "add"FF_OPSIZE" $4, %2 \n\t"
"4: \n\t"
"add $1, %6 \n\t"
@@ -202,7 +202,7 @@ static int decode_significance_8x8_x86(CABACContext *c,
"i"(offsetof(CABACContext, bytestream)),
"i"(offsetof(CABACContext, bytestream_end)),
"i"(H264_LAST_COEFF_FLAG_OFFSET_8x8_OFFSET) TABLES_ARG
- : "%"REG_c, "memory"
+ : "%"FF_REG_c, "memory"
);
return coeff_count;
}
diff --git a/libavcodec/x86/hpeldsp_rnd_template.c b/libavcodec/x86/hpeldsp_rnd_template.c
index e20d0658cd..2bff2d2766 100644
--- a/libavcodec/x86/hpeldsp_rnd_template.c
+++ b/libavcodec/x86/hpeldsp_rnd_template.c
@@ -32,7 +32,7 @@ av_unused static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels
{
MOVQ_BFE(mm6);
__asm__ volatile(
- "lea (%3, %3), %%"REG_a" \n\t"
+ "lea (%3, %3), %%"FF_REG_a" \n\t"
".p2align 3 \n\t"
"1: \n\t"
"movq (%1), %%mm0 \n\t"
@@ -42,8 +42,8 @@ av_unused static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
+ "add %%"FF_REG_a", %1 \n\t"
+ "add %%"FF_REG_a", %2 \n\t"
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm1 \n\t"
"movq (%1, %3), %%mm2 \n\t"
@@ -51,20 +51,20 @@ av_unused static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
+ "add %%"FF_REG_a", %1 \n\t"
+ "add %%"FF_REG_a", %2 \n\t"
"subl $4, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels), "+D"(block)
:"r"((x86_reg)line_size)
- :REG_a, "memory");
+ :FF_REG_a, "memory");
}
av_unused static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
MOVQ_BFE(mm6);
__asm__ volatile(
- "lea (%3, %3), %%"REG_a" \n\t"
+ "lea (%3, %3), %%"FF_REG_a" \n\t"
".p2align 3 \n\t"
"1: \n\t"
"movq (%1), %%mm0 \n\t"
@@ -81,8 +81,8 @@ av_unused static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixel
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, 8(%2) \n\t"
"movq %%mm5, 8(%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
+ "add %%"FF_REG_a", %1 \n\t"
+ "add %%"FF_REG_a", %2 \n\t"
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm1 \n\t"
"movq (%1, %3), %%mm2 \n\t"
@@ -97,42 +97,42 @@ av_unused static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixel
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, 8(%2) \n\t"
"movq %%mm5, 8(%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
+ "add %%"FF_REG_a", %1 \n\t"
+ "add %%"FF_REG_a", %2 \n\t"
"subl $4, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels), "+D"(block)
:"r"((x86_reg)line_size)
- :REG_a, "memory");
+ :FF_REG_a, "memory");
}
av_unused static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
MOVQ_BFE(mm6);
__asm__ volatile(
- "lea (%3, %3), %%"REG_a" \n\t"
+ "lea (%3, %3), %%"FF_REG_a" \n\t"
"movq (%1), %%mm0 \n\t"
".p2align 3 \n\t"
"1: \n\t"
"movq (%1, %3), %%mm1 \n\t"
- "movq (%1, %%"REG_a"),%%mm2 \n\t"
+ "movq (%1, %%"FF_REG_a"),%%mm2\n\t"
PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
+ "add %%"FF_REG_a", %1 \n\t"
+ "add %%"FF_REG_a", %2 \n\t"
"movq (%1, %3), %%mm1 \n\t"
- "movq (%1, %%"REG_a"),%%mm0 \n\t"
+ "movq (%1, %%"FF_REG_a"),%%mm0\n\t"
PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
+ "add %%"FF_REG_a", %1 \n\t"
+ "add %%"FF_REG_a", %2 \n\t"
"subl $4, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels), "+D"(block)
:"r"((x86_reg)line_size)
- :REG_a, "memory");
+ :FF_REG_a, "memory");
}
av_unused static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
@@ -166,12 +166,12 @@ av_unused static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels
{
MOVQ_BFE(mm6);
__asm__ volatile(
- "lea (%3, %3), %%"REG_a" \n\t"
+ "lea (%3, %3), %%"FF_REG_a" \n\t"
"movq (%1), %%mm0 \n\t"
".p2align 3 \n\t"
"1: \n\t"
"movq (%1, %3), %%mm1 \n\t"
- "movq (%1, %%"REG_a"), %%mm2 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm2 \n\t"
PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
"movq (%2), %%mm3 \n\t"
PAVGB_MMX(%%mm3, %%mm4, %%mm0, %%mm6)
@@ -179,11 +179,11 @@ av_unused static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels
PAVGB_MMX(%%mm3, %%mm5, %%mm1, %%mm6)
"movq %%mm0, (%2) \n\t"
"movq %%mm1, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
+ "add %%"FF_REG_a", %1 \n\t"
+ "add %%"FF_REG_a", %2 \n\t"
"movq (%1, %3), %%mm1 \n\t"
- "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
"movq (%2), %%mm3 \n\t"
PAVGB_MMX(%%mm3, %%mm4, %%mm2, %%mm6)
@@ -191,12 +191,12 @@ av_unused static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels
PAVGB_MMX(%%mm3, %%mm5, %%mm1, %%mm6)
"movq %%mm2, (%2) \n\t"
"movq %%mm1, (%2, %3) \n\t"
- "add %%"REG_a", %1 \n\t"
- "add %%"REG_a", %2 \n\t"
+ "add %%"FF_REG_a", %1 \n\t"
+ "add %%"FF_REG_a", %2 \n\t"
"subl $4, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels), "+D"(block)
:"r"((x86_reg)line_size)
- :REG_a, "memory");
+ :FF_REG_a, "memory");
}
diff --git a/libavcodec/x86/me_cmp_init.c b/libavcodec/x86/me_cmp_init.c
index 49f50d0eed..dc3e6f8668 100644
--- a/libavcodec/x86/me_cmp_init.c
+++ b/libavcodec/x86/me_cmp_init.c
@@ -283,15 +283,15 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2,
__asm__ volatile (
".p2align 4 \n\t"
"1: \n\t"
- "movq (%1, %%"REG_a"), %%mm0 \n\t"
- "movq (%2, %%"REG_a"), %%mm2 \n\t"
- "movq (%2, %%"REG_a"), %%mm4 \n\t"
- "add %3, %%"REG_a" \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq (%2, %%"FF_REG_a"), %%mm2 \n\t"
+ "movq (%2, %%"FF_REG_a"), %%mm4 \n\t"
+ "add %3, %%"FF_REG_a" \n\t"
"psubusb %%mm0, %%mm2 \n\t"
"psubusb %%mm4, %%mm0 \n\t"
- "movq (%1, %%"REG_a"), %%mm1 \n\t"
- "movq (%2, %%"REG_a"), %%mm3 \n\t"
- "movq (%2, %%"REG_a"), %%mm5 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm1 \n\t"
+ "movq (%2, %%"FF_REG_a"), %%mm3 \n\t"
+ "movq (%2, %%"FF_REG_a"), %%mm5 \n\t"
"psubusb %%mm1, %%mm3 \n\t"
"psubusb %%mm5, %%mm1 \n\t"
"por %%mm2, %%mm0 \n\t"
@@ -306,7 +306,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2,
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"paddw %%mm0, %%mm6 \n\t"
- "add %3, %%"REG_a" \n\t"
+ "add %3, %%"FF_REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1 - len), "r" (blk2 - len), "r" (stride));
@@ -319,18 +319,18 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
__asm__ volatile (
".p2align 4 \n\t"
"1: \n\t"
- "movq (%1, %%"REG_a"), %%mm0 \n\t"
- "movq (%2, %%"REG_a"), %%mm1 \n\t"
- "movq (%1, %%"REG_a"), %%mm2 \n\t"
- "movq (%2, %%"REG_a"), %%mm3 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq (%2, %%"FF_REG_a"), %%mm1 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm2 \n\t"
+ "movq (%2, %%"FF_REG_a"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpckhbw %%mm7, %%mm2 \n\t"
"punpckhbw %%mm7, %%mm3 \n\t"
"paddw %%mm0, %%mm1 \n\t"
"paddw %%mm2, %%mm3 \n\t"
- "movq (%3, %%"REG_a"), %%mm4 \n\t"
- "movq (%3, %%"REG_a"), %%mm2 \n\t"
+ "movq (%3, %%"FF_REG_a"), %%mm4 \n\t"
+ "movq (%3, %%"FF_REG_a"), %%mm2 \n\t"
"paddw %%mm5, %%mm1 \n\t"
"paddw %%mm5, %%mm3 \n\t"
"psrlw $1, %%mm1 \n\t"
@@ -344,7 +344,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
"punpckhbw %%mm7, %%mm1 \n\t"
"paddw %%mm1, %%mm0 \n\t"
"paddw %%mm0, %%mm6 \n\t"
- "add %4, %%"REG_a" \n\t"
+ "add %4, %%"FF_REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
@@ -356,8 +356,8 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
{
x86_reg len = -stride * h;
__asm__ volatile (
- "movq (%1, %%"REG_a"), %%mm0 \n\t"
- "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm0\n\t"
+ "movq 1(%1, %%"FF_REG_a"), %%mm2\n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
@@ -368,8 +368,8 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
"paddw %%mm3, %%mm1 \n\t"
".p2align 4 \n\t"
"1: \n\t"
- "movq (%2, %%"REG_a"), %%mm2 \n\t"
- "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
+ "movq (%2, %%"FF_REG_a"), %%mm2\n\t"
+ "movq 1(%2, %%"FF_REG_a"), %%mm4\n\t"
"movq %%mm2, %%mm3 \n\t"
"movq %%mm4, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -383,8 +383,8 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
"paddw %%mm3, %%mm1 \n\t"
"paddw %%mm5, %%mm0 \n\t"
"paddw %%mm5, %%mm1 \n\t"
- "movq (%3, %%"REG_a"), %%mm4 \n\t"
- "movq (%3, %%"REG_a"), %%mm5 \n\t"
+ "movq (%3, %%"FF_REG_a"), %%mm4 \n\t"
+ "movq (%3, %%"FF_REG_a"), %%mm5 \n\t"
"psrlw $2, %%mm0 \n\t"
"psrlw $2, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
@@ -398,7 +398,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
"paddw %%mm4, %%mm6 \n\t"
"movq %%mm2, %%mm0 \n\t"
"movq %%mm3, %%mm1 \n\t"
- "add %4, %%"REG_a" \n\t"
+ "add %4, %%"FF_REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
diff --git a/libavcodec/x86/mpegvideo.c b/libavcodec/x86/mpegvideo.c
index 18113265ba..35a8264804 100644
--- a/libavcodec/x86/mpegvideo.c
+++ b/libavcodec/x86/mpegvideo.c
@@ -188,13 +188,13 @@ __asm__ volatile(
"movd %2, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
- "mov %3, %%"REG_a" \n\t"
+ "mov %3, %%"FF_REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
- "movq (%0, %%"REG_a"), %%mm0 \n\t"
- "movq 8(%0, %%"REG_a"), %%mm1 \n\t"
- "movq (%1, %%"REG_a"), %%mm4 \n\t"
- "movq 8(%1, %%"REG_a"), %%mm5 \n\t"
+ "movq (%0, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq 8(%0, %%"FF_REG_a"), %%mm1\n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm4 \n\t"
+ "movq 8(%1, %%"FF_REG_a"), %%mm5\n\t"
"pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
"pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
"pxor %%mm2, %%mm2 \n\t"
@@ -209,8 +209,8 @@ __asm__ volatile(
"pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t" // FIXME slow
- "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw (%0, %%"FF_REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%"FF_REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
"psraw $3, %%mm0 \n\t"
"psraw $3, %%mm1 \n\t"
"psubw %%mm7, %%mm0 \n\t"
@@ -223,13 +223,13 @@ __asm__ volatile(
"psubw %%mm3, %%mm1 \n\t"
"pandn %%mm0, %%mm4 \n\t"
"pandn %%mm1, %%mm5 \n\t"
- "movq %%mm4, (%0, %%"REG_a") \n\t"
- "movq %%mm5, 8(%0, %%"REG_a") \n\t"
+ "movq %%mm4, (%0, %%"FF_REG_a") \n\t"
+ "movq %%mm5, 8(%0, %%"FF_REG_a")\n\t"
- "add $16, %%"REG_a" \n\t"
+ "add $16, %%"FF_REG_a" \n\t"
"js 1b \n\t"
::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
- : "%"REG_a, "memory"
+ : "%"FF_REG_a, "memory"
);
block[0]= block0;
}
@@ -251,13 +251,13 @@ __asm__ volatile(
"movd %2, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
- "mov %3, %%"REG_a" \n\t"
+ "mov %3, %%"FF_REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
- "movq (%0, %%"REG_a"), %%mm0 \n\t"
- "movq 8(%0, %%"REG_a"), %%mm1 \n\t"
- "movq (%1, %%"REG_a"), %%mm4 \n\t"
- "movq 8(%1, %%"REG_a"), %%mm5 \n\t"
+ "movq (%0, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq 8(%0, %%"FF_REG_a"), %%mm1\n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm4 \n\t"
+ "movq 8(%1, %%"FF_REG_a"), %%mm5\n\t"
"pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
"pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
"pxor %%mm2, %%mm2 \n\t"
@@ -276,8 +276,8 @@ __asm__ volatile(
"pmullw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t" // FIXME slow
- "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw (%0, %%"FF_REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%"FF_REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
"psraw $4, %%mm0 \n\t"
"psraw $4, %%mm1 \n\t"
"psubw %%mm7, %%mm0 \n\t"
@@ -290,13 +290,13 @@ __asm__ volatile(
"psubw %%mm3, %%mm1 \n\t"
"pandn %%mm0, %%mm4 \n\t"
"pandn %%mm1, %%mm5 \n\t"
- "movq %%mm4, (%0, %%"REG_a") \n\t"
- "movq %%mm5, 8(%0, %%"REG_a") \n\t"
+ "movq %%mm4, (%0, %%"FF_REG_a") \n\t"
+ "movq %%mm5, 8(%0, %%"FF_REG_a")\n\t"
- "add $16, %%"REG_a" \n\t"
+ "add $16, %%"FF_REG_a" \n\t"
"js 1b \n\t"
::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
- : "%"REG_a, "memory"
+ : "%"FF_REG_a, "memory"
);
}
@@ -326,13 +326,13 @@ __asm__ volatile(
"movd %2, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
- "mov %3, %%"REG_a" \n\t"
+ "mov %3, %%"FF_REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
- "movq (%0, %%"REG_a"), %%mm0 \n\t"
- "movq 8(%0, %%"REG_a"), %%mm1 \n\t"
- "movq (%1, %%"REG_a"), %%mm4 \n\t"
- "movq 8(%1, %%"REG_a"), %%mm5 \n\t"
+ "movq (%0, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq 8(%0, %%"FF_REG_a"), %%mm1\n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm4 \n\t"
+ "movq 8(%1, %%"FF_REG_a"), %%mm5\n\t"
"pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
"pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
"pxor %%mm2, %%mm2 \n\t"
@@ -347,8 +347,8 @@ __asm__ volatile(
"pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t" // FIXME slow
- "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw (%0, %%"FF_REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%"FF_REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
"psraw $4, %%mm0 \n\t"
"psraw $4, %%mm1 \n\t"
"pxor %%mm2, %%mm0 \n\t"
@@ -357,13 +357,13 @@ __asm__ volatile(
"psubw %%mm3, %%mm1 \n\t"
"pandn %%mm0, %%mm4 \n\t"
"pandn %%mm1, %%mm5 \n\t"
- "movq %%mm4, (%0, %%"REG_a") \n\t"
- "movq %%mm5, 8(%0, %%"REG_a") \n\t"
+ "movq %%mm4, (%0, %%"FF_REG_a") \n\t"
+ "movq %%mm5, 8(%0, %%"FF_REG_a")\n\t"
- "add $16, %%"REG_a" \n\t"
+ "add $16, %%"FF_REG_a" \n\t"
"jng 1b \n\t"
::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
- : "%"REG_a, "memory"
+ : "%"FF_REG_a, "memory"
);
block[0]= block0;
//Note, we do not do mismatch control for intra as errors cannot accumulate
@@ -390,13 +390,13 @@ __asm__ volatile(
"movd %2, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
- "mov %3, %%"REG_a" \n\t"
+ "mov %3, %%"FF_REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
- "movq (%0, %%"REG_a"), %%mm0 \n\t"
- "movq 8(%0, %%"REG_a"), %%mm1 \n\t"
- "movq (%1, %%"REG_a"), %%mm4 \n\t"
- "movq 8(%1, %%"REG_a"), %%mm5 \n\t"
+ "movq (%0, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq 8(%0, %%"FF_REG_a"), %%mm1\n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm4 \n\t"
+ "movq 8(%1, %%"FF_REG_a"), %%mm5\n\t"
"pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
"pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
"pxor %%mm2, %%mm2 \n\t"
@@ -415,8 +415,8 @@ __asm__ volatile(
"paddw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t" // FIXME slow
- "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw (%0, %%"FF_REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%"FF_REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
"psrlw $5, %%mm0 \n\t"
"psrlw $5, %%mm1 \n\t"
"pxor %%mm2, %%mm0 \n\t"
@@ -427,10 +427,10 @@ __asm__ volatile(
"pandn %%mm1, %%mm5 \n\t"
"pxor %%mm4, %%mm7 \n\t"
"pxor %%mm5, %%mm7 \n\t"
- "movq %%mm4, (%0, %%"REG_a") \n\t"
- "movq %%mm5, 8(%0, %%"REG_a") \n\t"
+ "movq %%mm4, (%0, %%"FF_REG_a") \n\t"
+ "movq %%mm5, 8(%0, %%"FF_REG_a")\n\t"
- "add $16, %%"REG_a" \n\t"
+ "add $16, %%"FF_REG_a" \n\t"
"jng 1b \n\t"
"movd 124(%0, %3), %%mm0 \n\t"
"movq %%mm7, %%mm6 \n\t"
@@ -445,7 +445,7 @@ __asm__ volatile(
"movd %%mm0, 124(%0, %3) \n\t"
::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "r" (-2*nCoeffs)
- : "%"REG_a, "memory"
+ : "%"FF_REG_a, "memory"
);
}
diff --git a/libavcodec/x86/mpegvideoenc_template.c b/libavcodec/x86/mpegvideoenc_template.c
index da76459cd6..b2512744ca 100644
--- a/libavcodec/x86/mpegvideoenc_template.c
+++ b/libavcodec/x86/mpegvideoenc_template.c
@@ -150,32 +150,32 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
if((s->out_format == FMT_H263 || s->out_format == FMT_H261) && s->mpeg_quant==0){
__asm__ volatile(
- "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1
+ "movd %%"FF_REG_a", "MM"3 \n\t" // last_non_zero_p1
SPREADW(MM"3")
"pxor "MM"7, "MM"7 \n\t" // 0
"pxor "MM"4, "MM"4 \n\t" // 0
MOVQ" (%2), "MM"5 \n\t" // qmat[0]
"pxor "MM"6, "MM"6 \n\t"
"psubw (%3), "MM"6 \n\t" // -bias[0]
- "mov $-128, %%"REG_a" \n\t"
+ "mov $-128, %%"FF_REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
- MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i]
+ MOVQ" (%1, %%"FF_REG_a"), "MM"0 \n\t" // block[i]
SAVE_SIGN(MM"1", MM"0") // ABS(block[i])
"psubusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0]
"pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16
"por "MM"0, "MM"4 \n\t"
RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
- MOVQ" "MM"0, (%5, %%"REG_a") \n\t"
+ MOVQ" "MM"0, (%5, %%"FF_REG_a") \n\t"
"pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00
- MOVQ" (%4, %%"REG_a"), "MM"1 \n\t"
- MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0
+ MOVQ" (%4, %%"FF_REG_a"), "MM"1 \n\t"
+ MOVQ" "MM"7, (%1, %%"FF_REG_a") \n\t" // 0
"pandn "MM"1, "MM"0 \n\t"
PMAXW(MM"0", MM"3")
- "add $"MMREG_WIDTH", %%"REG_a" \n\t"
+ "add $"MMREG_WIDTH", %%"FF_REG_a" \n\t"
" js 1b \n\t"
PMAX(MM"3", MM"0")
- "movd "MM"3, %%"REG_a" \n\t"
+ "movd "MM"3, %%"FF_REG_a" \n\t"
"movzbl %%al, %%eax \n\t" // last_non_zero_p1
: "+a" (last_non_zero_p1)
: "r" (block+64), "r" (qmat), "r" (bias),
@@ -185,31 +185,31 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
);
}else{ // FMT_H263
__asm__ volatile(
- "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1
+ "movd %%"FF_REG_a", "MM"3 \n\t" // last_non_zero_p1
SPREADW(MM"3")
"pxor "MM"7, "MM"7 \n\t" // 0
"pxor "MM"4, "MM"4 \n\t" // 0
- "mov $-128, %%"REG_a" \n\t"
+ "mov $-128, %%"FF_REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
- MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i]
+ MOVQ" (%1, %%"FF_REG_a"), "MM"0 \n\t" // block[i]
SAVE_SIGN(MM"1", MM"0") // ABS(block[i])
- MOVQ" (%3, %%"REG_a"), "MM"6 \n\t" // bias[0]
+ MOVQ" (%3, %%"FF_REG_a"), "MM"6 \n\t" // bias[0]
"paddusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0]
- MOVQ" (%2, %%"REG_a"), "MM"5 \n\t" // qmat[i]
+ MOVQ" (%2, %%"FF_REG_a"), "MM"5 \n\t" // qmat[i]
"pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] + bias[0]*qmat[0])>>16
"por "MM"0, "MM"4 \n\t"
RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
- MOVQ" "MM"0, (%5, %%"REG_a") \n\t"
+ MOVQ" "MM"0, (%5, %%"FF_REG_a") \n\t"
"pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00
- MOVQ" (%4, %%"REG_a"), "MM"1 \n\t"
- MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0
+ MOVQ" (%4, %%"FF_REG_a"), "MM"1 \n\t"
+ MOVQ" "MM"7, (%1, %%"FF_REG_a") \n\t" // 0
"pandn "MM"1, "MM"0 \n\t"
PMAXW(MM"0", MM"3")
- "add $"MMREG_WIDTH", %%"REG_a" \n\t"
+ "add $"MMREG_WIDTH", %%"FF_REG_a" \n\t"
" js 1b \n\t"
PMAX(MM"3", MM"0")
- "movd "MM"3, %%"REG_a" \n\t"
+ "movd "MM"3, %%"FF_REG_a" \n\t"
"movzbl %%al, %%eax \n\t" // last_non_zero_p1
: "+a" (last_non_zero_p1)
: "r" (block+64), "r" (qmat+64), "r" (bias+64),
diff --git a/libavcodec/x86/rnd_template.c b/libavcodec/x86/rnd_template.c
index ddca4eb590..09946bd23f 100644
--- a/libavcodec/x86/rnd_template.c
+++ b/libavcodec/x86/rnd_template.c
@@ -46,12 +46,12 @@ av_unused STATIC void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixel
"punpckhbw %%mm7, %%mm5 \n\t"
"paddusw %%mm0, %%mm4 \n\t"
"paddusw %%mm1, %%mm5 \n\t"
- "xor %%"REG_a", %%"REG_a" \n\t"
+ "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
"add %3, %1 \n\t"
".p2align 3 \n\t"
"1: \n\t"
- "movq (%1, %%"REG_a"), %%mm0 \n\t"
- "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq 1(%1, %%"FF_REG_a"), %%mm2 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
@@ -67,11 +67,11 @@ av_unused STATIC void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixel
"psrlw $2, %%mm4 \n\t"
"psrlw $2, %%mm5 \n\t"
"packuswb %%mm5, %%mm4 \n\t"
- "movq %%mm4, (%2, %%"REG_a") \n\t"
- "add %3, %%"REG_a" \n\t"
+ "movq %%mm4, (%2, %%"FF_REG_a") \n\t"
+ "add %3, %%"FF_REG_a" \n\t"
- "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
- "movq 1(%1, %%"REG_a"), %%mm4 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
+ "movq 1(%1, %%"FF_REG_a"), %%mm4 \n\t"
"movq %%mm2, %%mm3 \n\t"
"movq %%mm4, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -87,14 +87,14 @@ av_unused STATIC void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixel
"psrlw $2, %%mm0 \n\t"
"psrlw $2, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%2, %%"REG_a") \n\t"
- "add %3, %%"REG_a" \n\t"
+ "movq %%mm0, (%2, %%"FF_REG_a") \n\t"
+ "add %3, %%"FF_REG_a" \n\t"
"subl $2, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels)
:"D"(block), "r"((x86_reg)line_size)
- :REG_a, "memory");
+ :FF_REG_a, "memory");
}
// avg_pixels
@@ -115,12 +115,12 @@ av_unused STATIC void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixel
"punpckhbw %%mm7, %%mm5 \n\t"
"paddusw %%mm0, %%mm4 \n\t"
"paddusw %%mm1, %%mm5 \n\t"
- "xor %%"REG_a", %%"REG_a" \n\t"
+ "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
"add %3, %1 \n\t"
".p2align 3 \n\t"
"1: \n\t"
- "movq (%1, %%"REG_a"), %%mm0 \n\t"
- "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq 1(%1, %%"FF_REG_a"), %%mm2 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
@@ -135,16 +135,16 @@ av_unused STATIC void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixel
"paddusw %%mm1, %%mm5 \n\t"
"psrlw $2, %%mm4 \n\t"
"psrlw $2, %%mm5 \n\t"
- "movq (%2, %%"REG_a"), %%mm3 \n\t"
+ "movq (%2, %%"FF_REG_a"), %%mm3 \n\t"
"packuswb %%mm5, %%mm4 \n\t"
"pcmpeqd %%mm2, %%mm2 \n\t"
"paddb %%mm2, %%mm2 \n\t"
PAVGB_MMX(%%mm3, %%mm4, %%mm5, %%mm2)
- "movq %%mm5, (%2, %%"REG_a") \n\t"
- "add %3, %%"REG_a" \n\t"
+ "movq %%mm5, (%2, %%"FF_REG_a") \n\t"
+ "add %3, %%"FF_REG_a" \n\t"
- "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
- "movq 1(%1, %%"REG_a"), %%mm4 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
+ "movq 1(%1, %%"FF_REG_a"), %%mm4 \n\t"
"movq %%mm2, %%mm3 \n\t"
"movq %%mm4, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -159,17 +159,17 @@ av_unused STATIC void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixel
"paddusw %%mm5, %%mm1 \n\t"
"psrlw $2, %%mm0 \n\t"
"psrlw $2, %%mm1 \n\t"
- "movq (%2, %%"REG_a"), %%mm3 \n\t"
+ "movq (%2, %%"FF_REG_a"), %%mm3 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
"pcmpeqd %%mm2, %%mm2 \n\t"
"paddb %%mm2, %%mm2 \n\t"
PAVGB_MMX(%%mm3, %%mm0, %%mm1, %%mm2)
- "movq %%mm1, (%2, %%"REG_a") \n\t"
- "add %3, %%"REG_a" \n\t"
+ "movq %%mm1, (%2, %%"FF_REG_a") \n\t"
+ "add %3, %%"FF_REG_a" \n\t"
"subl $2, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels)
:"D"(block), "r"((x86_reg)line_size)
- :REG_a, "memory");
+ :FF_REG_a, "memory");
}
diff --git a/libavcodec/x86/snowdsp.c b/libavcodec/x86/snowdsp.c
index e2ad511d0a..218e6864db 100644
--- a/libavcodec/x86/snowdsp.c
+++ b/libavcodec/x86/snowdsp.c
@@ -390,10 +390,10 @@ static void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, IDWTELEM *temp, int w
#if HAVE_7REGS
#define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\
- ""op" ("r",%%"REG_d"), %%"t0" \n\t"\
- ""op" 16("r",%%"REG_d"), %%"t1" \n\t"\
- ""op" 32("r",%%"REG_d"), %%"t2" \n\t"\
- ""op" 48("r",%%"REG_d"), %%"t3" \n\t"
+ ""op" ("r",%%"FF_REG_d"), %%"t0" \n\t"\
+ ""op" 16("r",%%"FF_REG_d"), %%"t1" \n\t"\
+ ""op" 32("r",%%"FF_REG_d"), %%"t2" \n\t"\
+ ""op" 48("r",%%"FF_REG_d"), %%"t3" \n\t"
#define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\
snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3)
@@ -408,10 +408,10 @@ static void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, IDWTELEM *temp, int w
"psubw %%"s3", %%"t3" \n\t"
#define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\
- "movdqa %%"s0", ("w",%%"REG_d") \n\t"\
- "movdqa %%"s1", 16("w",%%"REG_d") \n\t"\
- "movdqa %%"s2", 32("w",%%"REG_d") \n\t"\
- "movdqa %%"s3", 48("w",%%"REG_d") \n\t"
+ "movdqa %%"s0", ("w",%%"FF_REG_d") \n\t"\
+ "movdqa %%"s1", 16("w",%%"FF_REG_d") \n\t"\
+ "movdqa %%"s2", 32("w",%%"FF_REG_d") \n\t"\
+ "movdqa %%"s3", 48("w",%%"FF_REG_d") \n\t"
#define snow_vertical_compose_sra(n,t0,t1,t2,t3)\
"psraw $"n", %%"t0" \n\t"\
@@ -477,14 +477,14 @@ static void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELE
"psrlw $13, %%xmm5 \n\t"
"paddw %%xmm7, %%xmm5 \n\t"
snow_vertical_compose_r2r_add("xmm5","xmm5","xmm5","xmm5","xmm0","xmm2","xmm4","xmm6")
- "movq (%2,%%"REG_d"), %%xmm1 \n\t"
- "movq 8(%2,%%"REG_d"), %%xmm3 \n\t"
+ "movq (%2,%%"FF_REG_d"), %%xmm1 \n\t"
+ "movq 8(%2,%%"FF_REG_d"), %%xmm3 \n\t"
"paddw %%xmm7, %%xmm1 \n\t"
"paddw %%xmm7, %%xmm3 \n\t"
"pavgw %%xmm1, %%xmm0 \n\t"
"pavgw %%xmm3, %%xmm2 \n\t"
- "movq 16(%2,%%"REG_d"), %%xmm1 \n\t"
- "movq 24(%2,%%"REG_d"), %%xmm3 \n\t"
+ "movq 16(%2,%%"FF_REG_d"), %%xmm1 \n\t"
+ "movq 24(%2,%%"FF_REG_d"), %%xmm3 \n\t"
"paddw %%xmm7, %%xmm1 \n\t"
"paddw %%xmm7, %%xmm3 \n\t"
"pavgw %%xmm1, %%xmm4 \n\t"
@@ -504,17 +504,17 @@ static void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELE
snow_vertical_compose_sse2_store("%2","xmm0","xmm2","xmm4","xmm6")
"2: \n\t"
- "sub $64, %%"REG_d" \n\t"
+ "sub $64, %%"FF_REG_d" \n\t"
"jge 1b \n\t"
:"+d"(i)
:"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
}
#define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\
- ""op" ("r",%%"REG_d"), %%"t0" \n\t"\
- ""op" 8("r",%%"REG_d"), %%"t1" \n\t"\
- ""op" 16("r",%%"REG_d"), %%"t2" \n\t"\
- ""op" 24("r",%%"REG_d"), %%"t3" \n\t"
+ ""op" ("r",%%"FF_REG_d"), %%"t0" \n\t"\
+ ""op" 8("r",%%"FF_REG_d"), %%"t1" \n\t"\
+ ""op" 16("r",%%"FF_REG_d"), %%"t2" \n\t"\
+ ""op" 24("r",%%"FF_REG_d"), %%"t3" \n\t"
#define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\
snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3)
@@ -523,10 +523,10 @@ static void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELE
snow_vertical_compose_mmx_load_add("paddw",r,t0,t1,t2,t3)
#define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\
- "movq %%"s0", ("w",%%"REG_d") \n\t"\
- "movq %%"s1", 8("w",%%"REG_d") \n\t"\
- "movq %%"s2", 16("w",%%"REG_d") \n\t"\
- "movq %%"s3", 24("w",%%"REG_d") \n\t"
+ "movq %%"s0", ("w",%%"FF_REG_d") \n\t"\
+ "movq %%"s1", 8("w",%%"FF_REG_d") \n\t"\
+ "movq %%"s2", 16("w",%%"FF_REG_d") \n\t"\
+ "movq %%"s3", 24("w",%%"FF_REG_d") \n\t"
#define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\
"movq %%"s0", %%"t0" \n\t"\
@@ -571,14 +571,14 @@ static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM
"psrlw $13, %%mm5 \n\t"
"paddw %%mm7, %%mm5 \n\t"
snow_vertical_compose_r2r_add("mm5","mm5","mm5","mm5","mm0","mm2","mm4","mm6")
- "movq (%2,%%"REG_d"), %%mm1 \n\t"
- "movq 8(%2,%%"REG_d"), %%mm3 \n\t"
+ "movq (%2,%%"FF_REG_d"), %%mm1 \n\t"
+ "movq 8(%2,%%"FF_REG_d"), %%mm3 \n\t"
"paddw %%mm7, %%mm1 \n\t"
"paddw %%mm7, %%mm3 \n\t"
"pavgw %%mm1, %%mm0 \n\t"
"pavgw %%mm3, %%mm2 \n\t"
- "movq 16(%2,%%"REG_d"), %%mm1 \n\t"
- "movq 24(%2,%%"REG_d"), %%mm3 \n\t"
+ "movq 16(%2,%%"FF_REG_d"), %%mm1 \n\t"
+ "movq 24(%2,%%"FF_REG_d"), %%mm3 \n\t"
"paddw %%mm7, %%mm1 \n\t"
"paddw %%mm7, %%mm3 \n\t"
"pavgw %%mm1, %%mm4 \n\t"
@@ -598,7 +598,7 @@ static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM
snow_vertical_compose_mmx_store("%2","mm0","mm2","mm4","mm6")
"2: \n\t"
- "sub $32, %%"REG_d" \n\t"
+ "sub $32, %%"FF_REG_d" \n\t"
"jge 1b \n\t"
:"+d"(i)
:"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
@@ -610,39 +610,39 @@ static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM
IDWTELEM * * dst_array = sb->line + src_y;\
x86_reg tmp;\
__asm__ volatile(\
- "mov %7, %%"REG_c" \n\t"\
+ "mov %7, %%"FF_REG_c" \n\t"\
"mov %6, %2 \n\t"\
- "mov %4, %%"REG_S" \n\t"\
+ "mov %4, %%"FF_REG_S" \n\t"\
"pxor %%xmm7, %%xmm7 \n\t" /* 0 */\
"pcmpeqd %%xmm3, %%xmm3 \n\t"\
"psllw $15, %%xmm3 \n\t"\
"psrlw $12, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\
"1: \n\t"\
- "mov %1, %%"REG_D" \n\t"\
- "mov (%%"REG_D"), %%"REG_D" \n\t"\
- "add %3, %%"REG_D" \n\t"
+ "mov %1, %%"FF_REG_D" \n\t"\
+ "mov (%%"FF_REG_D"), %%"FF_REG_D" \n\t"\
+ "add %3, %%"FF_REG_D" \n\t"
#define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\
- "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
- "movq (%%"REG_d"), %%"out_reg1" \n\t"\
- "movq (%%"REG_d", %%"REG_c"), %%"out_reg2" \n\t"\
+ "mov "FF_PTR_SIZE"*"ptr_offset"(%%"FF_REG_a"), %%"FF_REG_d"; \n\t"\
+ "movq (%%"FF_REG_d"), %%"out_reg1" \n\t"\
+ "movq (%%"FF_REG_d", %%"FF_REG_c"), %%"out_reg2" \n\t"\
"punpcklbw %%xmm7, %%"out_reg1" \n\t"\
"punpcklbw %%xmm7, %%"out_reg2" \n\t"\
- "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
- "movq "s_offset"+16(%%"REG_S"), %%xmm4 \n\t"\
+ "movq "s_offset"(%%"FF_REG_S"), %%xmm0 \n\t"\
+ "movq "s_offset"+16(%%"FF_REG_S"), %%xmm4 \n\t"\
"punpcklbw %%xmm7, %%xmm0 \n\t"\
"punpcklbw %%xmm7, %%xmm4 \n\t"\
"pmullw %%xmm0, %%"out_reg1" \n\t"\
"pmullw %%xmm4, %%"out_reg2" \n\t"
#define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\
- "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
- "movq (%%"REG_d"), %%"out_reg1" \n\t"\
- "movq 8(%%"REG_d"), %%"out_reg2" \n\t"\
+ "mov "FF_PTR_SIZE"*"ptr_offset"(%%"FF_REG_a"), %%"FF_REG_d"; \n\t"\
+ "movq (%%"FF_REG_d"), %%"out_reg1" \n\t"\
+ "movq 8(%%"FF_REG_d"), %%"out_reg2" \n\t"\
"punpcklbw %%xmm7, %%"out_reg1" \n\t"\
"punpcklbw %%xmm7, %%"out_reg2" \n\t"\
- "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
- "movq "s_offset"+8(%%"REG_S"), %%xmm4 \n\t"\
+ "movq "s_offset"(%%"FF_REG_S"), %%xmm0 \n\t"\
+ "movq "s_offset"+8(%%"FF_REG_S"), %%xmm4 \n\t"\
"punpcklbw %%xmm7, %%xmm0 \n\t"\
"punpcklbw %%xmm7, %%xmm4 \n\t"\
"pmullw %%xmm0, %%"out_reg1" \n\t"\
@@ -659,12 +659,12 @@ static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM
"paddusw %%xmm6, %%xmm5 \n\t"
#define snow_inner_add_yblock_sse2_end_common1\
- "add $32, %%"REG_S" \n\t"\
- "add %%"REG_c", %0 \n\t"\
- "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
- "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
- "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
- "add %%"REG_c", (%%"REG_a") \n\t"
+ "add $32, %%"FF_REG_S" \n\t"\
+ "add %%"FF_REG_c", %0 \n\t"\
+ "add %%"FF_REG_c", "FF_PTR_SIZE"*3(%%"FF_REG_a"); \n\t"\
+ "add %%"FF_REG_c", "FF_PTR_SIZE"*2(%%"FF_REG_a"); \n\t"\
+ "add %%"FF_REG_c", "FF_PTR_SIZE"*1(%%"FF_REG_a"); \n\t"\
+ "add %%"FF_REG_c", (%%"FF_REG_a") \n\t"
#define snow_inner_add_yblock_sse2_end_common2\
"jnz 1b \n\t"\
@@ -672,18 +672,18 @@ static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM
:\
"rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\
XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", )\
- "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
+ "%"FF_REG_c"","%"FF_REG_S"","%"FF_REG_D"","%"FF_REG_d"");
#define snow_inner_add_yblock_sse2_end_8\
- "sal $1, %%"REG_c" \n\t"\
- "add"OPSIZE" $"PTR_SIZE"*2, %1 \n\t"\
+ "sal $1, %%"FF_REG_c" \n\t"\
+ "add"FF_OPSIZE" $"FF_PTR_SIZE"*2, %1 \n\t"\
snow_inner_add_yblock_sse2_end_common1\
- "sar $1, %%"REG_c" \n\t"\
+ "sar $1, %%"FF_REG_c" \n\t"\
"sub $2, %2 \n\t"\
snow_inner_add_yblock_sse2_end_common2
#define snow_inner_add_yblock_sse2_end_16\
- "add"OPSIZE" $"PTR_SIZE"*1, %1 \n\t"\
+ "add"FF_OPSIZE" $"FF_PTR_SIZE"*1, %1 \n\t"\
snow_inner_add_yblock_sse2_end_common1\
"dec %2 \n\t"\
snow_inner_add_yblock_sse2_end_common2
@@ -696,28 +696,28 @@ snow_inner_add_yblock_sse2_accum_8("2", "8")
snow_inner_add_yblock_sse2_accum_8("1", "128")
snow_inner_add_yblock_sse2_accum_8("0", "136")
- "mov %0, %%"REG_d" \n\t"
- "movdqa (%%"REG_D"), %%xmm0 \n\t"
+ "mov %0, %%"FF_REG_d" \n\t"
+ "movdqa (%%"FF_REG_D"), %%xmm0 \n\t"
"movdqa %%xmm1, %%xmm2 \n\t"
"punpckhwd %%xmm7, %%xmm1 \n\t"
"punpcklwd %%xmm7, %%xmm2 \n\t"
"paddd %%xmm2, %%xmm0 \n\t"
- "movdqa 16(%%"REG_D"), %%xmm2 \n\t"
+ "movdqa 16(%%"FF_REG_D"), %%xmm2\n\t"
"paddd %%xmm1, %%xmm2 \n\t"
"paddd %%xmm3, %%xmm0 \n\t"
"paddd %%xmm3, %%xmm2 \n\t"
- "mov %1, %%"REG_D" \n\t"
- "mov "PTR_SIZE"(%%"REG_D"), %%"REG_D";\n\t"
- "add %3, %%"REG_D" \n\t"
+ "mov %1, %%"FF_REG_D" \n\t"
+ "mov "FF_PTR_SIZE"(%%"FF_REG_D"), %%"FF_REG_D"; \n\t"
+ "add %3, %%"FF_REG_D" \n\t"
- "movdqa (%%"REG_D"), %%xmm4 \n\t"
+ "movdqa (%%"FF_REG_D"), %%xmm4 \n\t"
"movdqa %%xmm5, %%xmm6 \n\t"
"punpckhwd %%xmm7, %%xmm5 \n\t"
"punpcklwd %%xmm7, %%xmm6 \n\t"
"paddd %%xmm6, %%xmm4 \n\t"
- "movdqa 16(%%"REG_D"), %%xmm6 \n\t"
+ "movdqa 16(%%"FF_REG_D"), %%xmm6\n\t"
"paddd %%xmm5, %%xmm6 \n\t"
"paddd %%xmm3, %%xmm4 \n\t"
"paddd %%xmm3, %%xmm6 \n\t"
@@ -726,13 +726,13 @@ snow_inner_add_yblock_sse2_accum_8("0", "136")
"psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */
"packssdw %%xmm2, %%xmm0 \n\t"
"packuswb %%xmm7, %%xmm0 \n\t"
- "movq %%xmm0, (%%"REG_d") \n\t"
+ "movq %%xmm0, (%%"FF_REG_d") \n\t"
"psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
"psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */
"packssdw %%xmm6, %%xmm4 \n\t"
"packuswb %%xmm7, %%xmm4 \n\t"
- "movq %%xmm4, (%%"REG_d",%%"REG_c");\n\t"
+ "movq %%xmm4, (%%"FF_REG_d",%%"FF_REG_c"); \n\t"
snow_inner_add_yblock_sse2_end_8
}
@@ -744,18 +744,18 @@ snow_inner_add_yblock_sse2_accum_16("2", "16")
snow_inner_add_yblock_sse2_accum_16("1", "512")
snow_inner_add_yblock_sse2_accum_16("0", "528")
- "mov %0, %%"REG_d" \n\t"
+ "mov %0, %%"FF_REG_d" \n\t"
"psrlw $4, %%xmm1 \n\t"
"psrlw $4, %%xmm5 \n\t"
- "paddw (%%"REG_D"), %%xmm1 \n\t"
- "paddw 16(%%"REG_D"), %%xmm5 \n\t"
+ "paddw (%%"FF_REG_D"), %%xmm1 \n\t"
+ "paddw 16(%%"FF_REG_D"), %%xmm5 \n\t"
"paddw %%xmm3, %%xmm1 \n\t"
"paddw %%xmm3, %%xmm5 \n\t"
"psraw $4, %%xmm1 \n\t" /* FRAC_BITS. */
"psraw $4, %%xmm5 \n\t" /* FRAC_BITS. */
"packuswb %%xmm5, %%xmm1 \n\t"
- "movdqu %%xmm1, (%%"REG_d") \n\t"
+ "movdqu %%xmm1, (%%"FF_REG_d") \n\t"
snow_inner_add_yblock_sse2_end_16
}
@@ -764,30 +764,30 @@ snow_inner_add_yblock_sse2_end_16
IDWTELEM * * dst_array = sb->line + src_y;\
x86_reg tmp;\
__asm__ volatile(\
- "mov %7, %%"REG_c" \n\t"\
+ "mov %7, %%"FF_REG_c" \n\t"\
"mov %6, %2 \n\t"\
- "mov %4, %%"REG_S" \n\t"\
+ "mov %4, %%"FF_REG_S" \n\t"\
"pxor %%mm7, %%mm7 \n\t" /* 0 */\
"pcmpeqd %%mm3, %%mm3 \n\t"\
"psllw $15, %%mm3 \n\t"\
"psrlw $12, %%mm3 \n\t" /* FRAC_BITS >> 1 */\
"1: \n\t"\
- "mov %1, %%"REG_D" \n\t"\
- "mov (%%"REG_D"), %%"REG_D" \n\t"\
- "add %3, %%"REG_D" \n\t"
+ "mov %1, %%"FF_REG_D" \n\t"\
+ "mov (%%"FF_REG_D"), %%"FF_REG_D" \n\t"\
+ "add %3, %%"FF_REG_D" \n\t"
#define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\
- "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
- "movd "d_offset"(%%"REG_d"), %%"out_reg1" \n\t"\
- "movd "d_offset"+4(%%"REG_d"), %%"out_reg2" \n\t"\
+ "mov "FF_PTR_SIZE"*"ptr_offset"(%%"FF_REG_a"), %%"FF_REG_d"; \n\t"\
+ "movd "d_offset"(%%"FF_REG_d"), %%"out_reg1" \n\t"\
+ "movd "d_offset"+4(%%"FF_REG_d"), %%"out_reg2" \n\t"\
"punpcklbw %%mm7, %%"out_reg1" \n\t"\
"punpcklbw %%mm7, %%"out_reg2" \n\t"\
- "movd "s_offset"(%%"REG_S"), %%mm0 \n\t"\
- "movd "s_offset"+4(%%"REG_S"), %%mm4 \n\t"\
+ "movd "s_offset"(%%"FF_REG_S"), %%mm0 \n\t"\
+ "movd "s_offset"+4(%%"FF_REG_S"), %%mm4 \n\t"\
"punpcklbw %%mm7, %%mm0 \n\t"\
"punpcklbw %%mm7, %%mm4 \n\t"\
- "pmullw %%mm0, %%"out_reg1" \n\t"\
- "pmullw %%mm4, %%"out_reg2" \n\t"
+ "pmullw %%mm0, %%"out_reg1" \n\t"\
+ "pmullw %%mm4, %%"out_reg2" \n\t"
#define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \
snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\
@@ -795,32 +795,32 @@ snow_inner_add_yblock_sse2_end_16
"paddusw %%mm6, %%mm5 \n\t"
#define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\
- "mov %0, %%"REG_d" \n\t"\
+ "mov %0, %%"FF_REG_d" \n\t"\
"psrlw $4, %%mm1 \n\t"\
"psrlw $4, %%mm5 \n\t"\
- "paddw "read_offset"(%%"REG_D"), %%mm1 \n\t"\
- "paddw "read_offset"+8(%%"REG_D"), %%mm5 \n\t"\
+ "paddw "read_offset"(%%"FF_REG_D"), %%mm1 \n\t"\
+ "paddw "read_offset"+8(%%"FF_REG_D"), %%mm5 \n\t"\
"paddw %%mm3, %%mm1 \n\t"\
"paddw %%mm3, %%mm5 \n\t"\
"psraw $4, %%mm1 \n\t"\
"psraw $4, %%mm5 \n\t"\
"packuswb %%mm5, %%mm1 \n\t"\
- "movq %%mm1, "write_offset"(%%"REG_d") \n\t"
+ "movq %%mm1, "write_offset"(%%"FF_REG_d") \n\t"
#define snow_inner_add_yblock_mmx_end(s_step)\
- "add $"s_step", %%"REG_S" \n\t"\
- "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
- "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
- "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
- "add %%"REG_c", (%%"REG_a") \n\t"\
- "add"OPSIZE " $"PTR_SIZE"*1, %1 \n\t"\
- "add %%"REG_c", %0 \n\t"\
+ "add $"s_step", %%"FF_REG_S" \n\t"\
+ "add %%"FF_REG_c", "FF_PTR_SIZE"*3(%%"FF_REG_a"); \n\t"\
+ "add %%"FF_REG_c", "FF_PTR_SIZE"*2(%%"FF_REG_a"); \n\t"\
+ "add %%"FF_REG_c", "FF_PTR_SIZE"*1(%%"FF_REG_a"); \n\t"\
+ "add %%"FF_REG_c", (%%"FF_REG_a") \n\t"\
+ "add"FF_OPSIZE " $"FF_PTR_SIZE"*1, %1 \n\t"\
+ "add %%"FF_REG_c", %0 \n\t"\
"dec %2 \n\t"\
"jnz 1b \n\t"\
:"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
:\
"rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\
- "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
+ "%"FF_REG_c"","%"FF_REG_S"","%"FF_REG_D"","%"FF_REG_d"");
static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c
index da32a3ee34..45c8a68f29 100644
--- a/libavcodec/x86/vc1dsp_mmx.c
+++ b/libavcodec/x86/vc1dsp_mmx.c
@@ -84,7 +84,7 @@ static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
{\
rnd = 8-rnd;\
__asm__ volatile(\
- "mov $8, %%"REG_c" \n\t"\
+ "mov $8, %%"FF_REG_c" \n\t"\
LOAD_ROUNDER_MMX("%5")\
"movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
"1: \n\t"\
@@ -119,13 +119,13 @@ static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
"movq %%mm3, (%1) \n\t"\
"add %6, %0 \n\t"\
"add %4, %1 \n\t"\
- "dec %%"REG_c" \n\t"\
+ "dec %%"FF_REG_c" \n\t"\
"jnz 1b \n\t"\
: "+r"(src), "+r"(dst)\
: "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
"g"(stride-offset)\
NAMED_CONSTRAINTS_ADD(ff_pw_9)\
- : "%"REG_c, "memory"\
+ : "%"FF_REG_c, "memory"\
);\
}