diff options
author | Lynne <dev@lynne.ee> | 2023-02-01 02:26:20 +0100 |
---|---|---|
committer | Lynne <dev@lynne.ee> | 2023-02-01 04:23:55 +0100 |
commit | bbe95f7353a972f28a48be8da883549f02c59e4b (patch) | |
tree | 08841c9da55e7f076f6046d1dbd70f49d74c0ec0 /libswscale | |
parent | fc9a3b584da3cf3fc1f00036be2eaf5dff903ccf (diff) | |
download | ffmpeg-bbe95f7353a972f28a48be8da883549f02c59e4b.tar.gz |
x86: replace explicit REP_RETs with RETs
From x86inc:
> On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
> a branch or a branch target. So switch to a 2-byte form of ret in that case.
> We can automatically detect "follows a branch", but not a branch target.
> (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
x86inc can automatically determine whether to use REP_RET rather than
REP in most of these cases, so impact is minimal. Additionally, a few
REP_RETs were used unnecessary, despite the return being nowhere near a
branch.
The only CPUs affected were AMD K10s, made between 2007 and 2011, 16
years ago and 12 years ago, respectively.
In the future, everyone involved with x86inc should consider dropping
REP_RETs altogether.
Diffstat (limited to 'libswscale')
-rw-r--r-- | libswscale/x86/input.asm | 14 | ||||
-rw-r--r-- | libswscale/x86/output.asm | 10 | ||||
-rw-r--r-- | libswscale/x86/scale.asm | 2 | ||||
-rw-r--r-- | libswscale/x86/scale_avx2.asm | 2 | ||||
-rw-r--r-- | libswscale/x86/yuv2yuvX.asm | 2 | ||||
-rw-r--r-- | libswscale/x86/yuv_2_rgb.asm | 2 |
6 files changed, 16 insertions, 16 deletions
diff --git a/libswscale/x86/input.asm b/libswscale/x86/input.asm index 6de6733faa..a197183f1f 100644 --- a/libswscale/x86/input.asm +++ b/libswscale/x86/input.asm @@ -207,7 +207,7 @@ cglobal %2 %+ 24ToY, 6, 6, %1, dst, src, u1, u2, w, table mova [dstq+wq], m0 add wq, mmsize jl .loop - REP_RET + RET %endif ; ARCH_X86_64 && %0 == 3 %endmacro @@ -313,7 +313,7 @@ cglobal %2 %+ 24ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table mova [dstVq+wq], m2 add wq, mmsize jl .loop - REP_RET + RET %endif ; ARCH_X86_64 && %0 == 3 %endmacro @@ -394,7 +394,7 @@ cglobal %2%3%4%5 %+ ToY, 6, 6, %1, dst, src, u1, u2, w, table add wq, 2 jl .loop2 .end: - REP_RET + RET %endif ; %0 == 3 %endmacro @@ -491,7 +491,7 @@ cglobal %2%3%4%5 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table add wq, 2 jl .loop2 .end: - REP_RET + RET %endif ; ARCH_X86_64 && %0 == 3 %endmacro @@ -543,7 +543,7 @@ RGB32_FUNCS 8, 12 mova [dstq+wq], m0 add wq, mmsize jl .loop_%1 - REP_RET + RET %endmacro ; %1 = nr. of XMM registers @@ -599,7 +599,7 @@ cglobal %2ToY, 5, 5, %1, dst, unused0, unused1, src, w movhps [dstVq+wq], m1 add wq, mmsize / 2 jl .loop_%1 - REP_RET + RET %endmacro ; %1 = nr. of XMM registers @@ -657,7 +657,7 @@ cglobal %2ToUV, 4, 5, %1, dstU, dstV, unused, src, w %endif ; nv12/21 add wq, mmsize jl .loop_%1 - REP_RET + RET %endmacro ; %1 = nr. of XMM registers diff --git a/libswscale/x86/output.asm b/libswscale/x86/output.asm index f943a27534..95ec2fa885 100644 --- a/libswscale/x86/output.asm +++ b/libswscale/x86/output.asm @@ -297,7 +297,7 @@ cglobal yuv2planeX_%1, %3, 8, %2, filter, fltsize, src, dst, w, dither, offset test dstq, 15 jnz .unaligned yuv2planeX_mainloop %1, a - REP_RET + RET .unaligned: yuv2planeX_mainloop %1, u %endif ; mmsize == 8/16 @@ -307,10 +307,10 @@ cglobal yuv2planeX_%1, %3, 8, %2, filter, fltsize, src, dst, w, dither, offset ADD rsp, pad RET %else ; x86-64 - REP_RET + RET %endif ; x86-32/64 %else ; %1 == 9/10/16 - REP_RET + RET %endif ; %1 == 8/9/10/16 %endmacro @@ -433,10 +433,10 @@ cglobal yuv2plane1_%1, %3, %3, %2, src, dst, w, dither, offset test dstq, 15 jnz .unaligned yuv2plane1_mainloop %1, a - REP_RET + RET .unaligned: yuv2plane1_mainloop %1, u - REP_RET + RET %endmacro INIT_XMM sse2 diff --git a/libswscale/x86/scale.asm b/libswscale/x86/scale.asm index c62ae3dcc2..2e14c8c023 100644 --- a/libswscale/x86/scale.asm +++ b/libswscale/x86/scale.asm @@ -357,7 +357,7 @@ cglobal hscale%1to%2_%4, %5, 10, %6, pos0, dst, w, srcmem, filter, fltpos, fltsi add wq, 2 %endif ; %3 ==/!= X jl .loop - REP_RET + RET %endmacro ; SCALE_FUNCS source_width, intermediate_nbits, n_xmm diff --git a/libswscale/x86/scale_avx2.asm b/libswscale/x86/scale_avx2.asm index 37095e596a..179895666a 100644 --- a/libswscale/x86/scale_avx2.asm +++ b/libswscale/x86/scale_avx2.asm @@ -144,7 +144,7 @@ cglobal hscale8to15_%1, 7, 9, 16, pos0, dst, w, srcmem, filter, fltpos, fltsize, cmp countq, wq jl .tail_loop .end: -REP_RET +RET %endmacro %if ARCH_X86_64 diff --git a/libswscale/x86/yuv2yuvX.asm b/libswscale/x86/yuv2yuvX.asm index d5b03495fd..369c850674 100644 --- a/libswscale/x86/yuv2yuvX.asm +++ b/libswscale/x86/yuv2yuvX.asm @@ -121,7 +121,7 @@ cglobal yuv2yuvX, 7, 7, 8, filter, filterSize, src, dest, dstW, dither, offset mov filterSizeq, filterq cmp offsetq, dstWq jb .outerloop - REP_RET + RET %endmacro INIT_MMX mmxext diff --git a/libswscale/x86/yuv_2_rgb.asm b/libswscale/x86/yuv_2_rgb.asm index c5fa3ee690..e3470fd9ad 100644 --- a/libswscale/x86/yuv_2_rgb.asm +++ b/libswscale/x86/yuv_2_rgb.asm @@ -354,7 +354,7 @@ add imageq, 8 * depth * time_num add indexq, 4 * time_num js .loop0 -REP_RET +RET %endmacro |