diff options
author | James Almer <jamrial@gmail.com> | 2024-07-10 13:00:20 -0300 |
---|---|---|
committer | James Almer <jamrial@gmail.com> | 2024-07-10 13:25:44 -0300 |
commit | bd1bcb07e0f29c135103a402d71b343a09ad1690 (patch) | |
tree | f675104705fb8c16b1f6c2dc8833a64d32bf035c /configure | |
parent | 4a04cca69af807ccf831da977a94350611967c4c (diff) | |
download | ffmpeg-bd1bcb07e0f29c135103a402d71b343a09ad1690.tar.gz |
x86/intreadwrite: use intrinsics instead of inline asm for AV_COPY128
This has the benefit of removing any SSE -> AVX penalty that may happen when
the compiler emits VEX encoded instructions.
Signed-off-by: James Almer <jamrial@gmail.com>
Diffstat (limited to 'configure')
-rwxr-xr-x | configure | 5 |
1 files changed, 4 insertions, 1 deletions
@@ -2314,6 +2314,7 @@ HEADERS_LIST=" INTRINSICS_LIST=" intrinsics_neon + intrinsics_sse intrinsics_sse2 " @@ -2744,7 +2745,8 @@ armv6t2_deps="arm" armv8_deps="aarch64" neon_deps_any="aarch64 arm" intrinsics_neon_deps="neon" -intrinsics_sse2_deps="sse2" +intrinsics_sse_deps="sse" +intrinsics_sse2_deps="sse2 intrinsics_sse" vfp_deps="arm" vfpv3_deps="vfp" setend_deps="arm" @@ -6446,6 +6448,7 @@ elif enabled loongarch; then fi check_cc intrinsics_neon arm_neon.h "int16x8_t test = vdupq_n_s16(0)" +check_cc intrinsics_sse immintrin.h "__m128 test = _mm_setzero_ps()" check_cc intrinsics_sse2 emmintrin.h "__m128i test = _mm_setzero_si128()" check_ldflags -Wl,--as-needed |