diff options
author | Lynne <dev@lynne.ee> | 2023-02-01 02:26:20 +0100 |
---|---|---|
committer | Lynne <dev@lynne.ee> | 2023-02-01 04:23:55 +0100 |
commit | bbe95f7353a972f28a48be8da883549f02c59e4b (patch) | |
tree | 08841c9da55e7f076f6046d1dbd70f49d74c0ec0 /libavcodec/x86/h264_intrapred_10bit.asm | |
parent | fc9a3b584da3cf3fc1f00036be2eaf5dff903ccf (diff) | |
download | ffmpeg-bbe95f7353a972f28a48be8da883549f02c59e4b.tar.gz |
x86: replace explicit REP_RETs with RETs
From x86inc:
> On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
> a branch or a branch target. So switch to a 2-byte form of ret in that case.
> We can automatically detect "follows a branch", but not a branch target.
> (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
x86inc can automatically determine whether to use REP_RET rather than
REP in most of these cases, so impact is minimal. Additionally, a few
REP_RETs were used unnecessary, despite the return being nowhere near a
branch.
The only CPUs affected were AMD K10s, made between 2007 and 2011, 16
years ago and 12 years ago, respectively.
In the future, everyone involved with x86inc should consider dropping
REP_RETs altogether.
Diffstat (limited to 'libavcodec/x86/h264_intrapred_10bit.asm')
-rw-r--r-- | libavcodec/x86/h264_intrapred_10bit.asm | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/libavcodec/x86/h264_intrapred_10bit.asm b/libavcodec/x86/h264_intrapred_10bit.asm index c4645d434e..2f30807332 100644 --- a/libavcodec/x86/h264_intrapred_10bit.asm +++ b/libavcodec/x86/h264_intrapred_10bit.asm @@ -327,7 +327,7 @@ cglobal pred8x8_horizontal_10, 2, 3 lea r0, [r0+r1*2] dec r2d jg .loop - REP_RET + RET ;----------------------------------------------------------------------------- ; void ff_predict_8x8_dc_10(pixel *src, ptrdiff_t stride) @@ -481,7 +481,7 @@ cglobal pred8x8_plane_10, 2, 7, 7 add r0, r1 dec r2d jg .loop - REP_RET + RET ;----------------------------------------------------------------------------- @@ -994,7 +994,7 @@ cglobal pred16x16_vertical_10, 2, 3 lea r0, [r0+r1*2] dec r2d jg .loop - REP_RET + RET ;----------------------------------------------------------------------------- ; void ff_pred16x16_horizontal_10(pixel *src, ptrdiff_t stride) @@ -1012,7 +1012,7 @@ cglobal pred16x16_horizontal_10, 2, 3 lea r0, [r0+r1*2] dec r2d jg .vloop - REP_RET + RET ;----------------------------------------------------------------------------- ; void ff_pred16x16_dc_10(pixel *src, ptrdiff_t stride) @@ -1048,7 +1048,7 @@ cglobal pred16x16_dc_10, 2, 6 lea r5, [r5+r1*2] dec r3d jg .loop - REP_RET + RET ;----------------------------------------------------------------------------- ; void ff_pred16x16_top_dc_10(pixel *src, ptrdiff_t stride) @@ -1070,7 +1070,7 @@ cglobal pred16x16_top_dc_10, 2, 3 lea r0, [r0+r1*2] dec r2d jg .loop - REP_RET + RET ;----------------------------------------------------------------------------- ; void ff_pred16x16_left_dc_10(pixel *src, ptrdiff_t stride) @@ -1101,7 +1101,7 @@ cglobal pred16x16_left_dc_10, 2, 6 lea r5, [r5+r1*2] dec r3d jg .loop - REP_RET + RET ;----------------------------------------------------------------------------- ; void ff_pred16x16_128_dc_10(pixel *src, ptrdiff_t stride) @@ -1116,4 +1116,4 @@ cglobal pred16x16_128_dc_10, 2,3 lea r0, [r0+r1*2] dec r2d jg .loop - REP_RET + RET |