aboutsummaryrefslogtreecommitdiffstats
path: root/libavfilter/x86
diff options
context:
space:
mode:
authorLynne <dev@lynne.ee>2023-02-01 02:26:20 +0100
committerLynne <dev@lynne.ee>2023-02-01 04:23:55 +0100
commitbbe95f7353a972f28a48be8da883549f02c59e4b (patch)
tree08841c9da55e7f076f6046d1dbd70f49d74c0ec0 /libavfilter/x86
parentfc9a3b584da3cf3fc1f00036be2eaf5dff903ccf (diff)
downloadffmpeg-bbe95f7353a972f28a48be8da883549f02c59e4b.tar.gz
x86: replace explicit REP_RETs with RETs
From x86inc: > On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either > a branch or a branch target. So switch to a 2-byte form of ret in that case. > We can automatically detect "follows a branch", but not a branch target. > (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.) x86inc can automatically determine whether to use REP_RET rather than REP in most of these cases, so impact is minimal. Additionally, a few REP_RETs were used unnecessary, despite the return being nowhere near a branch. The only CPUs affected were AMD K10s, made between 2007 and 2011, 16 years ago and 12 years ago, respectively. In the future, everyone involved with x86inc should consider dropping REP_RETs altogether.
Diffstat (limited to 'libavfilter/x86')
-rw-r--r--libavfilter/x86/af_volume.asm6
-rw-r--r--libavfilter/x86/avf_showcqt.asm4
-rw-r--r--libavfilter/x86/scene_sad.asm2
-rw-r--r--libavfilter/x86/vf_blend.asm2
-rw-r--r--libavfilter/x86/vf_framerate.asm2
-rw-r--r--libavfilter/x86/vf_gradfun.asm6
-rw-r--r--libavfilter/x86/vf_hqdn3d.asm2
-rw-r--r--libavfilter/x86/vf_interlace.asm6
-rw-r--r--libavfilter/x86/vf_maskedmerge.asm2
-rw-r--r--libavfilter/x86/vf_stereo3d.asm2
-rw-r--r--libavfilter/x86/vf_w3fdif.asm10
11 files changed, 22 insertions, 22 deletions
diff --git a/libavfilter/x86/af_volume.asm b/libavfilter/x86/af_volume.asm
index 723ab1f8fb..35a00784a2 100644
--- a/libavfilter/x86/af_volume.asm
+++ b/libavfilter/x86/af_volume.asm
@@ -56,7 +56,7 @@ cglobal scale_samples_s16, 4,4,4, dst, src, len, volume
mova [dstq+lenq], m3
sub lenq, mmsize
jge .loop
- REP_RET
+ RET
;------------------------------------------------------------------------------
; void ff_scale_samples_s32(uint8_t *dst, const uint8_t *src, int len,
@@ -93,7 +93,7 @@ cglobal scale_samples_s32, 4,4,4, dst, src, len, volume
%endif
sub lenq, mmsize
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -137,4 +137,4 @@ cglobal scale_samples_s32, 4,4,8, dst, src, len, volume
mova [dstq+lenq], m0
sub lenq, mmsize
jge .loop
- REP_RET
+ RET
diff --git a/libavfilter/x86/avf_showcqt.asm b/libavfilter/x86/avf_showcqt.asm
index 63e58408cd..16af0de9b0 100644
--- a/libavfilter/x86/avf_showcqt.asm
+++ b/libavfilter/x86/avf_showcqt.asm
@@ -127,7 +127,7 @@ cglobal showcqt_cqt_calc, 5, 10, 12, dst, src, coeffs, len, fft_len, x, coeffs_v
lea dstq, [dstq + 16]
lea coeffsq, [coeffsq + 2*Coeffs.sizeof]
jnz .loop_k
- REP_RET
+ RET
align 16
.check_loop_a:
cmp xd, [coeffsq + Coeffs.len]
@@ -170,7 +170,7 @@ cglobal showcqt_cqt_calc, 4, 7, 8, dst, src, coeffs, len, x, coeffs_val, i
lea dstq, [dstq + 8]
lea coeffsq, [coeffsq + Coeffs.sizeof]
jnz .loop_k
- REP_RET
+ RET
%endif ; ARCH_X86_64
%endmacro ; DECLARE_CQT_CALC
diff --git a/libavfilter/x86/scene_sad.asm b/libavfilter/x86/scene_sad.asm
index d38d71ccca..bf7236b3a3 100644
--- a/libavfilter/x86/scene_sad.asm
+++ b/libavfilter/x86/scene_sad.asm
@@ -53,7 +53,7 @@ cglobal scene_sad, 6, 7, 2, src1, stride1, src2, stride2, width, end, x
mov r0q, r6mp
movu [r0q], m1 ; sum
-REP_RET
+RET
%endmacro
diff --git a/libavfilter/x86/vf_blend.asm b/libavfilter/x86/vf_blend.asm
index 277b100e4d..362020ec95 100644
--- a/libavfilter/x86/vf_blend.asm
+++ b/libavfilter/x86/vf_blend.asm
@@ -63,7 +63,7 @@ cglobal blend_%1, 5, 7, %2, top, top_linesize, bottom, bottom_linesize, dst, end
add dstq, dst_linesizeq
sub endd, 1
jg .nextrow
-REP_RET
+RET
%endmacro
%macro BLEND_SIMPLE 2-3 0
diff --git a/libavfilter/x86/vf_framerate.asm b/libavfilter/x86/vf_framerate.asm
index 7a30c870bd..b5505b4ff8 100644
--- a/libavfilter/x86/vf_framerate.asm
+++ b/libavfilter/x86/vf_framerate.asm
@@ -84,7 +84,7 @@ cglobal blend_frames%1, 5, 7, 5, src1, src1_linesize, src2, src2_linesize, dst,
add dstq, dst_linesizeq
sub endd, 1
jg .nextrow
-REP_RET
+RET
%endmacro
diff --git a/libavfilter/x86/vf_gradfun.asm b/libavfilter/x86/vf_gradfun.asm
index 3581f89fe8..d106d52100 100644
--- a/libavfilter/x86/vf_gradfun.asm
+++ b/libavfilter/x86/vf_gradfun.asm
@@ -64,7 +64,7 @@ cglobal gradfun_filter_line, 6, 6
add r0, 4
jl .loop
.end:
- REP_RET
+ RET
INIT_XMM ssse3
cglobal gradfun_filter_line, 6, 6, 8
@@ -78,7 +78,7 @@ cglobal gradfun_filter_line, 6, 6, 8
FILTER_LINE m4
add r0, 8
jl .loop
- REP_RET
+ RET
%macro BLUR_LINE 1
cglobal gradfun_blur_line_%1, 6, 6, 8
@@ -102,7 +102,7 @@ cglobal gradfun_blur_line_%1, 6, 6, 8
mova [r3+r0], m0
add r0, 16
jl .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
diff --git a/libavfilter/x86/vf_hqdn3d.asm b/libavfilter/x86/vf_hqdn3d.asm
index e3b1bdca53..2c0ca45571 100644
--- a/libavfilter/x86/vf_hqdn3d.asm
+++ b/libavfilter/x86/vf_hqdn3d.asm
@@ -97,7 +97,7 @@ ALIGN 16
inc xq
jl .loop
je .loop2
- REP_RET
+ RET
%endmacro ; HQDN3D_ROW
HQDN3D_ROW 8
diff --git a/libavfilter/x86/vf_interlace.asm b/libavfilter/x86/vf_interlace.asm
index f4a405c754..c28f9fbe3e 100644
--- a/libavfilter/x86/vf_interlace.asm
+++ b/libavfilter/x86/vf_interlace.asm
@@ -73,7 +73,7 @@ SECTION .text
jl .loop
.end:
- REP_RET
+ RET
%endmacro
%macro LOWPASS_LINE 0
@@ -146,7 +146,7 @@ cglobal lowpass_line_complex, 5, 5, 8, dst, h, src, mref, pref
add srcq, mmsize
sub hd, mmsize
jg .loop
-REP_RET
+RET
cglobal lowpass_line_complex_12, 5, 5, 8, 16, dst, h, src, mref, pref, clip_max
movd m7, DWORD clip_maxm
@@ -208,7 +208,7 @@ cglobal lowpass_line_complex_12, 5, 5, 8, 16, dst, h, src, mref, pref, clip_max
add srcq, 2*mmsize
sub hd, mmsize
jg .loop
-REP_RET
+RET
%endmacro
INIT_XMM sse2
diff --git a/libavfilter/x86/vf_maskedmerge.asm b/libavfilter/x86/vf_maskedmerge.asm
index 1028299087..d9bd4688fd 100644
--- a/libavfilter/x86/vf_maskedmerge.asm
+++ b/libavfilter/x86/vf_maskedmerge.asm
@@ -81,4 +81,4 @@ cglobal maskedmerge8, 5, 7, 8, bsrc, osrc, msrc, dst, blinesize, w, x
add dstq, dlinesizeq
sub hd, 1
jg .nextrow
-REP_RET
+RET
diff --git a/libavfilter/x86/vf_stereo3d.asm b/libavfilter/x86/vf_stereo3d.asm
index a057e495f1..b6a293b18e 100644
--- a/libavfilter/x86/vf_stereo3d.asm
+++ b/libavfilter/x86/vf_stereo3d.asm
@@ -213,4 +213,4 @@ cglobal anaglyph, 3, 6, 8, 2*9*mmsize, dst, lsrc, rsrc, dst_linesize, o, cnt
add rsrcq, r_linesizeq
sub heightd, 1
jg .nextrow
-REP_RET
+RET
diff --git a/libavfilter/x86/vf_w3fdif.asm b/libavfilter/x86/vf_w3fdif.asm
index 52628c38d7..3010469f97 100644
--- a/libavfilter/x86/vf_w3fdif.asm
+++ b/libavfilter/x86/vf_w3fdif.asm
@@ -38,7 +38,7 @@ cglobal w3fdif_scale, 3, 3, 2, 0, out_pixel, work_pixel, linesize
add work_pixelq, mmsize*2
sub linesized, mmsize/2
jg .loop
-REP_RET
+RET
cglobal w3fdif_simple_low, 4, 5, 6, 0, work_line, in_lines_cur0, coef, linesize, offset
movd m1, [coefq]
@@ -63,7 +63,7 @@ cglobal w3fdif_simple_low, 4, 5, 6, 0, work_line, in_lines_cur0, coef, linesize,
add offsetq, mmsize/2
sub linesized, mmsize/2
jg .loop
-REP_RET
+RET
cglobal w3fdif_complex_low, 4, 7, 8, 0, work_line, in_lines_cur0, coef, linesize
movq m0, [coefq]
@@ -99,7 +99,7 @@ cglobal w3fdif_complex_low, 4, 7, 8, 0, work_line, in_lines_cur0, coef, linesize
add offsetq, mmsize/2
sub linesized, mmsize/2
jg .loop
-REP_RET
+RET
%if ARCH_X86_64
cglobal w3fdif_simple_high, 5, 9, 8, 0, work_line, in_lines_cur0, in_lines_adj0, coef, linesize
@@ -179,7 +179,7 @@ cglobal w3fdif_simple_high, 4, 7, 8, 0, work_line, in_lines_cur0, in_lines_adj0,
add offsetq, mmsize/2
sub linesized, mmsize/2
jg .loop
-REP_RET
+RET
%if ARCH_X86_64
@@ -254,6 +254,6 @@ cglobal w3fdif_complex_high, 5, 13, 10, 0, work_line, in_lines_cur0, in_lines_ad
add offsetq, mmsize/2
sub linesized, mmsize/2
jg .loop
-REP_RET
+RET
%endif