diff options
author | Diego Pettenò <flameeyes@gmail.com> | 2008-10-16 13:34:09 +0000 |
---|---|---|
committer | Diego Pettenò <flameeyes@gmail.com> | 2008-10-16 13:34:09 +0000 |
commit | be449fca79a3b0394143f0a77c99784e65868d9f (patch) | |
tree | 5c5b2bbfe648467292b30cc501265e556acab101 /libavcodec/i386/motion_est_mmx.c | |
parent | a14b362fc650a5e036d413033d9709a526662d89 (diff) | |
download | ffmpeg-be449fca79a3b0394143f0a77c99784e65868d9f.tar.gz |
Convert asm keyword into __asm__.
Neither the asm() nor the __asm__() keyword is part of the C99
standard, but while GCC accepts the former in C89 syntax, it is not
accepted in C99 unless GNU extensions are turned on (with -fasm). The
latter form is accepted in any syntax as an extension (without
requiring further command-line options).
Sun Studio C99 compiler also does not accept asm() while accepting
__asm__(), albeit reporting warnings that it's not valid C99 syntax.
Originally committed as revision 15627 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/i386/motion_est_mmx.c')
-rw-r--r-- | libavcodec/i386/motion_est_mmx.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/libavcodec/i386/motion_est_mmx.c b/libavcodec/i386/motion_est_mmx.c index 0e111f9f97..16291612a5 100644 --- a/libavcodec/i386/motion_est_mmx.c +++ b/libavcodec/i386/motion_est_mmx.c @@ -36,7 +36,7 @@ DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL; static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { x86_reg len= -(stride*h); - asm volatile( + __asm__ volatile( ASMALIGN(4) "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" @@ -71,7 +71,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { - asm volatile( + __asm__ volatile( ASMALIGN(4) "1: \n\t" "movq (%1), %%mm0 \n\t" @@ -92,7 +92,7 @@ static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) { int ret; - asm volatile( + __asm__ volatile( "pxor %%xmm6, %%xmm6 \n\t" ASMALIGN(4) "1: \n\t" @@ -109,7 +109,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) : "+r" (h), "+r" (blk1), "+r" (blk2) : "r" ((x86_reg)stride) ); - asm volatile( + __asm__ volatile( "movhlps %%xmm6, %%xmm0 \n\t" "paddw %%xmm0, %%xmm6 \n\t" "movd %%xmm6, %0 \n\t" @@ -120,7 +120,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { - asm volatile( + __asm__ volatile( ASMALIGN(4) "1: \n\t" "movq (%1), %%mm0 \n\t" @@ -142,7 +142,7 @@ static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { - asm volatile( + __asm__ volatile( "movq (%1), %%mm0 \n\t" "add %3, %1 \n\t" ASMALIGN(4) @@ -167,7 +167,7 @@ static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { - asm volatile( + __asm__ volatile( "movq "MANGLE(bone)", %%mm5 \n\t" "movq (%1), %%mm0 \n\t" "pavgb 1(%1), %%mm0 \n\t" @@ -198,7 +198,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h) { x86_reg len= -(stride*h); - asm volatile( + __asm__ volatile( ASMALIGN(4) "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" @@ -236,7 +236,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { x86_reg len= -(stride*h); - asm volatile( + __asm__ volatile( "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq 1(%1, %%"REG_a"), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" @@ -289,7 +289,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) static inline int sum_mmx(void) { int ret; - asm volatile( + __asm__ volatile( "movq %%mm6, %%mm0 \n\t" "psrlq $32, %%mm6 \n\t" "paddw %%mm0, %%mm6 \n\t" @@ -305,7 +305,7 @@ static inline int sum_mmx(void) static inline int sum_mmx2(void) { int ret; - asm volatile( + __asm__ volatile( "movd %%mm6, %0 \n\t" : "=r" (ret) ); @@ -326,7 +326,7 @@ static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ assert(h==8);\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ + __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t":);\ \ sad8_1_ ## suf(blk1, blk2, stride, 8);\ @@ -336,7 +336,7 @@ static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ assert(h==8);\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ + __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ @@ -350,7 +350,7 @@ static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, in static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ assert(h==8);\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ + __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ @@ -364,7 +364,7 @@ static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, in static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ assert(h==8);\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ + __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ ::);\ \ @@ -375,7 +375,7 @@ static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, i \ static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ + __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t":);\ \ sad8_1_ ## suf(blk1 , blk2 , stride, h);\ @@ -385,7 +385,7 @@ static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int }\ static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ + __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ @@ -398,7 +398,7 @@ static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, i }\ static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ + __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ @@ -411,7 +411,7 @@ static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, i }\ static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ + __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ ::);\ \ |