diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2001-10-17 03:40:03 +0000 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2001-10-17 03:40:03 +0000 |
commit | a6e972a217cfea5d0d6bffffaef82dabb3c4ba3a (patch) | |
tree | 297e6a47b47aea3416260555ee72920e90ccb30a | |
parent | d3f41512a8e275befaaec17ff792a9c24cac0ab0 (diff) | |
download | ffmpeg-a6e972a217cfea5d0d6bffffaef82dabb3c4ba3a.tar.gz |
fixed out of regs "bug"
Originally committed as revision 2234 to svn://svn.mplayerhq.hu/mplayer/trunk/postproc
-rw-r--r-- | postproc/swscale.c | 15 | ||||
-rw-r--r-- | postproc/swscale_template.c | 15 |
2 files changed, 18 insertions, 12 deletions
diff --git a/postproc/swscale.c b/postproc/swscale.c index b3746598d3..b500fb110e 100644 --- a/postproc/swscale.c +++ b/postproc/swscale.c @@ -288,8 +288,9 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha "shll $8, %%edi \n\t" "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) + "movl %1, %%edi \n\t" "shrl $1, %%esi \n\t" - "movw %%si, (%1, %%eax, 2) \n\t" + "movw %%si, (%%edi, %%eax, 2) \n\t" "addb %4, %%cl \n\t" //2*xalpha += s_xinc&0xFF "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry @@ -299,8 +300,9 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha "shll $8, %%edi \n\t" "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) + "movl %1, %%edi \n\t" "shrl $1, %%esi \n\t" - "movw %%si, 2(%1, %%eax, 2) \n\t" + "movw %%si, 2(%%edi, %%eax, 2) \n\t" "addb %4, %%cl \n\t" //2*xalpha += s_xinc&0xFF "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry @@ -310,7 +312,7 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe " jb 1b \n\t" - :: "r" (src), "r" (buf1), "m" (dstw), "m" (s_xinc>>8), "m" (s_xinc&0xFF) + :: "r" (src), "m" (buf1), "m" (dstw), "m" (s_xinc>>8), "m" (s_xinc&0xFF) : "%eax", "%ebx", "%ecx", "%edi", "%esi" ); #else @@ -416,8 +418,9 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe "xorl %%ebx, %%ebx \n\t" // xx "xorl %%ecx, %%ecx \n\t" // 2*xalpha "1: \n\t" - "movzbl (%0, %%ebx), %%edi \n\t" //src[xx] - "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1] + "movl %0, %%esi \n\t" + "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx] + "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1] "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha "shll $8, %%edi \n\t" @@ -443,7 +446,7 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe " jb 1b \n\t" - :: "r" (src1), "m" (uvbuf1), "m" (dstw), "m" (s_xinc2>>8), "m" (s_xinc2&0xFF), + :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" (s_xinc2>>8), "m" (s_xinc2&0xFF), "r" (src2) : "%eax", "%ebx", "%ecx", "%edi", "%esi" ); diff --git a/postproc/swscale_template.c b/postproc/swscale_template.c index b3746598d3..b500fb110e 100644 --- a/postproc/swscale_template.c +++ b/postproc/swscale_template.c @@ -288,8 +288,9 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha "shll $8, %%edi \n\t" "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) + "movl %1, %%edi \n\t" "shrl $1, %%esi \n\t" - "movw %%si, (%1, %%eax, 2) \n\t" + "movw %%si, (%%edi, %%eax, 2) \n\t" "addb %4, %%cl \n\t" //2*xalpha += s_xinc&0xFF "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry @@ -299,8 +300,9 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha "shll $8, %%edi \n\t" "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) + "movl %1, %%edi \n\t" "shrl $1, %%esi \n\t" - "movw %%si, 2(%1, %%eax, 2) \n\t" + "movw %%si, 2(%%edi, %%eax, 2) \n\t" "addb %4, %%cl \n\t" //2*xalpha += s_xinc&0xFF "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry @@ -310,7 +312,7 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe " jb 1b \n\t" - :: "r" (src), "r" (buf1), "m" (dstw), "m" (s_xinc>>8), "m" (s_xinc&0xFF) + :: "r" (src), "m" (buf1), "m" (dstw), "m" (s_xinc>>8), "m" (s_xinc&0xFF) : "%eax", "%ebx", "%ecx", "%edi", "%esi" ); #else @@ -416,8 +418,9 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe "xorl %%ebx, %%ebx \n\t" // xx "xorl %%ecx, %%ecx \n\t" // 2*xalpha "1: \n\t" - "movzbl (%0, %%ebx), %%edi \n\t" //src[xx] - "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1] + "movl %0, %%esi \n\t" + "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx] + "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1] "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha "shll $8, %%edi \n\t" @@ -443,7 +446,7 @@ s_xinc&= -2; //clear last bit or uv and y might be shifted relative to each othe " jb 1b \n\t" - :: "r" (src1), "m" (uvbuf1), "m" (dstw), "m" (s_xinc2>>8), "m" (s_xinc2&0xFF), + :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" (s_xinc2>>8), "m" (s_xinc2&0xFF), "r" (src2) : "%eax", "%ebx", "%ecx", "%edi", "%esi" ); |