diff options
author | Reimar Döffinger <Reimar.Doeffinger@gmx.de> | 2010-09-05 10:10:16 +0000 |
---|---|---|
committer | Reimar Döffinger <Reimar.Doeffinger@gmx.de> | 2010-09-05 10:10:16 +0000 |
commit | b1c32fb5e5e9f6d18077b4f4d6fb7e6fd946630c (patch) | |
tree | 78a47c210935a8441f1a9a96f669b1cbe6591d7e | |
parent | 87db37356c3cf63372447d5b8e0fb95e4ed66c15 (diff) | |
download | ffmpeg-b1c32fb5e5e9f6d18077b4f4d6fb7e6fd946630c.tar.gz |
Use "d" suffix for general-purpose registers used with movd.
This increases compatibilty with nasm and is also more consistent,
e.g. with h264_intrapred.asm and h264_chromamc.asm that already
do it that way.
Originally committed as revision 25042 to svn://svn.ffmpeg.org/ffmpeg/trunk
-rw-r--r-- | libavcodec/x86/h264_weight.asm | 22 | ||||
-rw-r--r-- | libavcodec/x86/vc1dsp_yasm.asm | 6 | ||||
-rw-r--r-- | libavcodec/x86/vp3dsp.asm | 6 | ||||
-rw-r--r-- | libavcodec/x86/vp8dsp.asm | 26 |
4 files changed, 30 insertions, 30 deletions
diff --git a/libavcodec/x86/h264_weight.asm b/libavcodec/x86/h264_weight.asm index 6e89ab3bc7..53aa210473 100644 --- a/libavcodec/x86/h264_weight.asm +++ b/libavcodec/x86/h264_weight.asm @@ -40,9 +40,9 @@ SECTION .text %macro WEIGHT_SETUP 0 add r4, r4 inc r4 - movd m3, r3 - movd m5, r4 - movd m6, r2 + movd m3, r3d + movd m5, r4d + movd m6, r2d pslld m5, m6 psrld m5, 1 %if mmsize == 16 @@ -156,10 +156,10 @@ WEIGHT_FUNC_HALF_MM 8, 4, 16, 8, sse2 add r6, 1 or r6, 1 add r3, 1 - movd m3, r4 - movd m4, r5 - movd m5, r6 - movd m6, r3 + movd m3, r4d + movd m4, r5d + movd m5, r6d + movd m6, r3d pslld m5, m6 psrld m5, 1 %if mmsize == 16 @@ -291,10 +291,10 @@ BIWEIGHT_FUNC_HALF_MM 8, 4, 16, 8, sse2 add r6, 1 or r6, 1 add r3, 1 - movd m4, r4 - movd m0, r5 - movd m5, r6 - movd m6, r3 + movd m4, r4d + movd m0, r5d + movd m5, r6d + movd m6, r3d pslld m5, m6 psrld m5, 1 punpcklbw m4, m0 diff --git a/libavcodec/x86/vc1dsp_yasm.asm b/libavcodec/x86/vc1dsp_yasm.asm index 660ff11693..3ea9d8db47 100644 --- a/libavcodec/x86/vc1dsp_yasm.asm +++ b/libavcodec/x86/vc1dsp_yasm.asm @@ -36,7 +36,7 @@ section .text %endmacro %macro STORE_4_WORDS_MMX 6 - movd %6, %5 + movd %6d, %5 %if mmsize==16 psrldq %5, 4 %else @@ -45,7 +45,7 @@ section .text mov %1, %6w shr %6, 16 mov %2, %6w - movd %6, %5 + movd %6d, %5 mov %3, %6w shr %6, 16 mov %4, %6w @@ -88,7 +88,7 @@ section .text pxor m7, m3 ; d_sign ^= a0_sign pxor m5, m5 - movd m3, r2 + movd m3, r2d %if %1 > 4 punpcklbw m3, m3 %endif diff --git a/libavcodec/x86/vp3dsp.asm b/libavcodec/x86/vp3dsp.asm index 95ea561ed0..f2b0af3266 100644 --- a/libavcodec/x86/vp3dsp.asm +++ b/libavcodec/x86/vp3dsp.asm @@ -93,12 +93,12 @@ SECTION .text %endmacro %macro STORE_4_WORDS 1 - movd r2, %1 + movd r2d, %1 mov [r0 -1], r2w psrlq %1, 32 shr r2, 16 mov [r0+r1 -1], r2w - movd r2, %1 + movd r2d, %1 mov [r0+r1*2-1], r2w shr r2, 16 mov [r0+r3 -1], r2w @@ -606,7 +606,7 @@ cglobal vp3_idct_dc_add_mmx2, 3, 4 movsx r2, word [r2] add r2, 15 sar r2, 5 - movd m0, r2 + movd m0, r2d pshufw m0, m0, 0x0 pxor m1, m1 psubw m1, m0 diff --git a/libavcodec/x86/vp8dsp.asm b/libavcodec/x86/vp8dsp.asm index 8cdbb3c7ab..bc5ccc8e3a 100644 --- a/libavcodec/x86/vp8dsp.asm +++ b/libavcodec/x86/vp8dsp.asm @@ -1342,7 +1342,7 @@ VP8_DC_WHT sse psrldq m%2, 4 %if %10 == 8 movd [%5+%8*2], m%1 - movd %5, m%3 + movd %5d, m%3 %endif psrldq m%3, 4 psrldq m%4, 4 @@ -1379,26 +1379,26 @@ VP8_DC_WHT sse ; 4 is a pointer to the destination's 4th line ; 5/6 is -stride and +stride %macro WRITE_2x4W 6 - movd %3, %1 + movd %3d, %1 punpckhdq %1, %1 mov [%4+%5*4], %3w shr %3, 16 add %4, %6 mov [%4+%5*4], %3w - movd %3, %1 + movd %3d, %1 add %4, %5 mov [%4+%5*2], %3w shr %3, 16 mov [%4+%5 ], %3w - movd %3, %2 + movd %3d, %2 punpckhdq %2, %2 mov [%4 ], %3w shr %3, 16 mov [%4+%6 ], %3w - movd %3, %2 + movd %3d, %2 add %4, %6 mov [%4+%6 ], %3w shr %3, 16 @@ -1407,27 +1407,27 @@ VP8_DC_WHT sse %endmacro %macro WRITE_8W_SSE2 5 - movd %2, %1 + movd %2d, %1 psrldq %1, 4 mov [%3+%4*4], %2w shr %2, 16 add %3, %5 mov [%3+%4*4], %2w - movd %2, %1 + movd %2d, %1 psrldq %1, 4 add %3, %4 mov [%3+%4*2], %2w shr %2, 16 mov [%3+%4 ], %2w - movd %2, %1 + movd %2d, %1 psrldq %1, 4 mov [%3 ], %2w shr %2, 16 mov [%3+%5 ], %2w - movd %2, %1 + movd %2d, %1 add %3, %5 mov [%3+%5 ], %2w shr %2, 16 @@ -1446,27 +1446,27 @@ VP8_DC_WHT sse %endmacro %macro SPLATB_REG_MMX 2-3 - movd %1, %2 + movd %1, %2d punpcklbw %1, %1 punpcklwd %1, %1 punpckldq %1, %1 %endmacro %macro SPLATB_REG_MMXEXT 2-3 - movd %1, %2 + movd %1, %2d punpcklbw %1, %1 pshufw %1, %1, 0x0 %endmacro %macro SPLATB_REG_SSE2 2-3 - movd %1, %2 + movd %1, %2d punpcklbw %1, %1 pshuflw %1, %1, 0x0 punpcklqdq %1, %1 %endmacro %macro SPLATB_REG_SSSE3 3 - movd %1, %2 + movd %1, %2d pshufb %1, %3 %endmacro |