diff options
author | Reimar Döffinger <Reimar.Doeffinger@gmx.de> | 2010-09-05 10:10:16 +0000 |
---|---|---|
committer | Reimar Döffinger <Reimar.Doeffinger@gmx.de> | 2010-09-05 10:10:16 +0000 |
commit | b1c32fb5e5e9f6d18077b4f4d6fb7e6fd946630c (patch) | |
tree | 78a47c210935a8441f1a9a96f669b1cbe6591d7e /libavcodec/x86/vp8dsp.asm | |
parent | 87db37356c3cf63372447d5b8e0fb95e4ed66c15 (diff) | |
download | ffmpeg-b1c32fb5e5e9f6d18077b4f4d6fb7e6fd946630c.tar.gz |
Use "d" suffix for general-purpose registers used with movd.
This increases compatibilty with nasm and is also more consistent,
e.g. with h264_intrapred.asm and h264_chromamc.asm that already
do it that way.
Originally committed as revision 25042 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/x86/vp8dsp.asm')
-rw-r--r-- | libavcodec/x86/vp8dsp.asm | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/libavcodec/x86/vp8dsp.asm b/libavcodec/x86/vp8dsp.asm index 8cdbb3c7ab..bc5ccc8e3a 100644 --- a/libavcodec/x86/vp8dsp.asm +++ b/libavcodec/x86/vp8dsp.asm @@ -1342,7 +1342,7 @@ VP8_DC_WHT sse psrldq m%2, 4 %if %10 == 8 movd [%5+%8*2], m%1 - movd %5, m%3 + movd %5d, m%3 %endif psrldq m%3, 4 psrldq m%4, 4 @@ -1379,26 +1379,26 @@ VP8_DC_WHT sse ; 4 is a pointer to the destination's 4th line ; 5/6 is -stride and +stride %macro WRITE_2x4W 6 - movd %3, %1 + movd %3d, %1 punpckhdq %1, %1 mov [%4+%5*4], %3w shr %3, 16 add %4, %6 mov [%4+%5*4], %3w - movd %3, %1 + movd %3d, %1 add %4, %5 mov [%4+%5*2], %3w shr %3, 16 mov [%4+%5 ], %3w - movd %3, %2 + movd %3d, %2 punpckhdq %2, %2 mov [%4 ], %3w shr %3, 16 mov [%4+%6 ], %3w - movd %3, %2 + movd %3d, %2 add %4, %6 mov [%4+%6 ], %3w shr %3, 16 @@ -1407,27 +1407,27 @@ VP8_DC_WHT sse %endmacro %macro WRITE_8W_SSE2 5 - movd %2, %1 + movd %2d, %1 psrldq %1, 4 mov [%3+%4*4], %2w shr %2, 16 add %3, %5 mov [%3+%4*4], %2w - movd %2, %1 + movd %2d, %1 psrldq %1, 4 add %3, %4 mov [%3+%4*2], %2w shr %2, 16 mov [%3+%4 ], %2w - movd %2, %1 + movd %2d, %1 psrldq %1, 4 mov [%3 ], %2w shr %2, 16 mov [%3+%5 ], %2w - movd %2, %1 + movd %2d, %1 add %3, %5 mov [%3+%5 ], %2w shr %2, 16 @@ -1446,27 +1446,27 @@ VP8_DC_WHT sse %endmacro %macro SPLATB_REG_MMX 2-3 - movd %1, %2 + movd %1, %2d punpcklbw %1, %1 punpcklwd %1, %1 punpckldq %1, %1 %endmacro %macro SPLATB_REG_MMXEXT 2-3 - movd %1, %2 + movd %1, %2d punpcklbw %1, %1 pshufw %1, %1, 0x0 %endmacro %macro SPLATB_REG_SSE2 2-3 - movd %1, %2 + movd %1, %2d punpcklbw %1, %1 pshuflw %1, %1, 0x0 punpcklqdq %1, %1 %endmacro %macro SPLATB_REG_SSSE3 3 - movd %1, %2 + movd %1, %2d pshufb %1, %3 %endmacro |