diff options
author | Henrik Gramner <henrik@gramner.com> | 2024-03-16 16:39:37 +0100 |
---|---|---|
committer | Henrik Gramner <henrik@gramner.com> | 2024-03-24 14:53:57 +0100 |
commit | afa471d0efed1df5dca6eeeb2fcdd211ae4cad4e (patch) | |
tree | db94cecfd1fe1cb5951773461f0d7d93526f0d10 | |
parent | 782c4df28dc91a2b5160fe7a35ad18541e8c5029 (diff) | |
download | ffmpeg-afa471d0efed1df5dca6eeeb2fcdd211ae4cad4e.tar.gz |
x86: Update x86inc.asm
Make things up-to-date with upstream.
https://code.videolan.org/videolan/x86inc.asm
-rwxr-xr-x | configure | 4 | ||||
-rw-r--r-- | libavcodec/x86/celt_pvq_search.asm | 4 | ||||
-rw-r--r-- | libavcodec/x86/h264_chromamc.asm | 10 | ||||
-rw-r--r-- | libavcodec/x86/h264_idct.asm | 6 | ||||
-rw-r--r-- | libavcodec/x86/h264_intrapred.asm | 15 | ||||
-rw-r--r-- | libavcodec/x86/hevc_mc.asm | 16 | ||||
-rw-r--r-- | libavcodec/x86/rv40dsp.asm | 12 | ||||
-rw-r--r-- | libavcodec/x86/sbrdsp.asm | 6 | ||||
-rw-r--r-- | libavcodec/x86/vp8dsp.asm | 30 | ||||
-rw-r--r-- | libavcodec/x86/vp9itxfm.asm | 2 | ||||
-rw-r--r-- | libavcodec/x86/vp9itxfm_16bpp.asm | 12 | ||||
-rw-r--r-- | libavutil/x86/x86inc.asm | 672 |
12 files changed, 519 insertions, 270 deletions
@@ -2222,7 +2222,6 @@ ARCH_EXT_LIST_RISCV=" ARCH_EXT_LIST_X86=" $ARCH_EXT_LIST_X86_SIMD - cpunop i686 " @@ -2771,7 +2770,6 @@ mipsdsp_deps="mips" mipsdspr2_deps="mips" msa_deps="mipsfpu" -cpunop_deps="i686" x86_64_select="i686" x86_64_suggest="fast_cmov" @@ -6401,7 +6399,6 @@ EOF done disabled x86asm && die "nasm/yasm not found or too old. Use --disable-x86asm for a crippled build." X86ASMFLAGS="-f $objformat" - enabled pic && append X86ASMFLAGS "-DPIC" test -n "$extern_prefix" && append X86ASMFLAGS "-DPREFIX" case "$objformat" in elf*) enabled debug && append X86ASMFLAGS $x86asm_debug ;; @@ -6412,7 +6409,6 @@ EOF enabled avx2 && check_x86asm avx2_external "vextracti128 xmm0, ymm0, 0" enabled xop && check_x86asm xop_external "vpmacsdd xmm0, xmm1, xmm2, xmm3" enabled fma4 && check_x86asm fma4_external "vfmaddps ymm0, ymm1, ymm2, ymm3" - check_x86asm cpunop "CPU amdnop" fi case "$cpu" in diff --git a/libavcodec/x86/celt_pvq_search.asm b/libavcodec/x86/celt_pvq_search.asm index 5c1e6d6174..e9bff02650 100644 --- a/libavcodec/x86/celt_pvq_search.asm +++ b/libavcodec/x86/celt_pvq_search.asm @@ -74,7 +74,7 @@ SECTION .text ; "movaps m0, [r5 + r4]" if PIC is enabled ; "movaps m0, [constant_name + r4]" if texrel are used %macro SET_PIC_BASE 3; reg, const_label -%ifdef PIC +%if PIC %{1} %2, [%3] ; lea r5, [rip+const] %define pic_base_%3 %2 %else @@ -195,7 +195,7 @@ align 16 ; PIC relative addressing. Use this ; to count it in cglobal ; -%ifdef PIC +%if PIC %define num_pic_regs 1 %else %define num_pic_regs 0 diff --git a/libavcodec/x86/h264_chromamc.asm b/libavcodec/x86/h264_chromamc.asm index e70bc492b2..ec6288d48e 100644 --- a/libavcodec/x86/h264_chromamc.asm +++ b/libavcodec/x86/h264_chromamc.asm @@ -91,7 +91,7 @@ SECTION .text %macro chroma_mc8_mmx_func 2-3 %ifidn %2, rv40 -%ifdef PIC +%if PIC %define rnd_1d_rv40 r8 %define rnd_2d_rv40 r8 %define extra_regs 2 @@ -147,7 +147,7 @@ cglobal %1_%2_chroma_mc8%3, 6, 7 + extra_regs, 0 or r4d, r5d ; x + y %ifidn %2, rv40 -%ifdef PIC +%if PIC lea r8, [rnd_rv40_1d_tbl] %endif %if ARCH_X86_64 == 0 @@ -198,7 +198,7 @@ cglobal %1_%2_chroma_mc8%3, 6, 7 + extra_regs, 0 movd m4, r4d ; x movd m6, r5d ; y %ifidn %2, rv40 -%ifdef PIC +%if PIC lea r8, [rnd_rv40_2d_tbl] %endif %if ARCH_X86_64 == 0 @@ -283,7 +283,7 @@ cglobal %1_%2_chroma_mc8%3, 6, 7 + extra_regs, 0 %macro chroma_mc4_mmx_func 2 %define extra_regs 0 %ifidn %2, rv40 -%ifdef PIC +%if PIC %define extra_regs 1 %endif ; PIC %endif ; rv40 @@ -301,7 +301,7 @@ cglobal %1_%2_chroma_mc4, 6, 6 + extra_regs, 0 psubw m5, m3 %ifidn %2, rv40 -%ifdef PIC +%if PIC lea r6, [rnd_rv40_2d_tbl] %define rnd_2d_rv40 r6 %else diff --git a/libavcodec/x86/h264_idct.asm b/libavcodec/x86/h264_idct.asm index 1f86e51d82..b29ddde200 100644 --- a/libavcodec/x86/h264_idct.asm +++ b/libavcodec/x86/h264_idct.asm @@ -42,7 +42,7 @@ scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8 db 6+11*8, 7+11*8, 6+12*8, 7+12*8 db 4+13*8, 5+13*8, 4+14*8, 5+14*8 db 6+13*8, 7+13*8, 6+14*8, 7+14*8 -%ifdef PIC +%if PIC %define npicregs 1 %define scan8 picregq %else @@ -322,7 +322,7 @@ INIT_XMM sse2 cglobal h264_idct8_add4_8, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg movsxdifnidn r3, r3d xor r5, r5 -%ifdef PIC +%if PIC lea picregq, [scan8_mem] %endif .nextblock: @@ -398,7 +398,7 @@ h264_idct_add8_mmx_plane: cglobal h264_idct_add8_422_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg ; dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg movsxdifnidn r3, r3d -%ifdef PIC +%if PIC lea picregq, [scan8_mem] %endif %if ARCH_X86_64 diff --git a/libavcodec/x86/h264_intrapred.asm b/libavcodec/x86/h264_intrapred.asm index ea46bc595d..a8a630dbe6 100644 --- a/libavcodec/x86/h264_intrapred.asm +++ b/libavcodec/x86/h264_intrapred.asm @@ -1311,10 +1311,7 @@ PRED8x8L_DOWN_RIGHT ;----------------------------------------------------------------------------- %macro PRED8x8L_VERTICAL_RIGHT 0 -cglobal pred8x8l_vertical_right_8, 4,5,7 - ; manually spill XMM registers for Win64 because - ; the code here is initialized with INIT_MMX - WIN64_SPILL_XMM 7 +cglobal pred8x8l_vertical_right_8, 4,5,6 sub r0, r3 lea r4, [r0+r3*2] movq mm0, [r0+r3*1-8] @@ -1384,7 +1381,6 @@ cglobal pred8x8l_vertical_right_8, 4,5,7 movq2dq xmm4, mm6 pslldq xmm4, 8 por xmm0, xmm4 - movdqa xmm6, [pw_ff00] movdqa xmm1, xmm0 lea r2, [r1+r3*2] movdqa xmm2, xmm0 @@ -1394,15 +1390,16 @@ cglobal pred8x8l_vertical_right_8, 4,5,7 pavgb xmm2, xmm0 INIT_XMM cpuname PRED4x4_LOWPASS xmm4, xmm3, xmm1, xmm0, xmm5 - pandn xmm6, xmm4 + movdqa xmm0, [pw_ff00] + pandn xmm0, xmm4 movdqa xmm5, xmm4 psrlw xmm4, 8 - packuswb xmm6, xmm4 - movhlps xmm4, xmm6 + packuswb xmm0, xmm4 + movhlps xmm4, xmm0 movhps [r0+r3*2], xmm5 movhps [r0+r3*1], xmm2 psrldq xmm5, 4 - movss xmm5, xmm6 + movss xmm5, xmm0 psrldq xmm2, 4 movss xmm2, xmm4 lea r0, [r2+r3*2] diff --git a/libavcodec/x86/hevc_mc.asm b/libavcodec/x86/hevc_mc.asm index 5489701e44..b3b589b271 100644 --- a/libavcodec/x86/hevc_mc.asm +++ b/libavcodec/x86/hevc_mc.asm @@ -180,7 +180,7 @@ SECTION .text %macro EPEL_FILTER 5 ; bit depth, filter index, xmma, xmmb, gprtmp %if cpuflag(avx2) %assign %%offset 32 -%ifdef PIC +%if PIC lea %5q, [hevc_epel_filters_avx2_%1] %define FILTER %5q %else @@ -188,7 +188,7 @@ SECTION .text %endif %else %assign %%offset 16 -%ifdef PIC +%if PIC lea %5q, [hevc_epel_filters_sse4_%1] %define FILTER %5q %else @@ -216,7 +216,7 @@ SECTION .text %define %%table hevc_epel_filters_sse4_%1 %endif -%ifdef PIC +%if PIC lea r3srcq, [%%table] %define FILTER r3srcq %else @@ -234,7 +234,7 @@ SECTION .text %else %define %%table hevc_epel_filters_sse4_10 %endif -%ifdef PIC +%if PIC lea r3srcq, [%%table] %define FILTER r3srcq %else @@ -257,7 +257,7 @@ SECTION .text %define %%table hevc_qpel_filters_sse4_%1 %endif -%ifdef PIC +%if PIC lea rfilterq, [%%table] %else %define rfilterq %%table @@ -576,7 +576,7 @@ SECTION .text %define %%table hevc_qpel_filters_sse4_%2 %endif -%ifdef PIC +%if PIC lea rfilterq, [%%table] %else %define rfilterq %%table @@ -1288,7 +1288,7 @@ HEVC_PUT_HEVC_QPEL_HV 16, 10 %assign %%offset 4 dec %2q shl %2q, 3 -%ifdef PIC +%if PIC lea %5q, [%%table] %define FILTER %5q %else @@ -1365,7 +1365,7 @@ cglobal hevc_put_hevc_qpel_hv%1_%2, 6, 7, 27, dst, src, srcstride, height, mx, m sub myq, 1 shl myq, 5 %define %%table hevc_qpel_filters_avx512icl_v_%1 -%ifdef PIC +%if PIC lea tmpq, [%%table] %define FILTER tmpq %else diff --git a/libavcodec/x86/rv40dsp.asm b/libavcodec/x86/rv40dsp.asm index e3c37dd297..dc520dbeb4 100644 --- a/libavcodec/x86/rv40dsp.asm +++ b/libavcodec/x86/rv40dsp.asm @@ -51,7 +51,7 @@ sixtap_filter_v_m: times 8 dw 1 times 8 dw 20 times 8 dw 52 -%ifdef PIC +%if PIC %define sixtap_filter_hw picregq %define sixtap_filter_hb picregq %define sixtap_filter_v picregq @@ -84,7 +84,7 @@ SECTION .text %if WIN64 movsxd %1q, %1d %endif -%ifdef PIC +%if PIC add %1q, picregq %else add %1q, %2 @@ -104,7 +104,7 @@ SECTION .text %macro FILTER_V 1 cglobal %1_rv40_qpel_v, 6,6+npicregs,12, dst, dststride, src, srcstride, height, my, picreg -%ifdef PIC +%if PIC lea picregq, [sixtap_filter_v_m] %endif pxor m7, m7 @@ -175,7 +175,7 @@ cglobal %1_rv40_qpel_v, 6,6+npicregs,12, dst, dststride, src, srcstride, height, %macro FILTER_H 1 cglobal %1_rv40_qpel_h, 6, 6+npicregs, 12, dst, dststride, src, srcstride, height, mx, picreg -%ifdef PIC +%if PIC lea picregq, [sixtap_filter_v_m] %endif pxor m7, m7 @@ -238,7 +238,7 @@ FILTER_V avg %macro FILTER_SSSE3 1 cglobal %1_rv40_qpel_v, 6,6+npicregs,8, dst, dststride, src, srcstride, height, my, picreg -%ifdef PIC +%if PIC lea picregq, [sixtap_filter_hb_m] %endif @@ -283,7 +283,7 @@ cglobal %1_rv40_qpel_v, 6,6+npicregs,8, dst, dststride, src, srcstride, height, RET cglobal %1_rv40_qpel_h, 6,6+npicregs,8, dst, dststride, src, srcstride, height, mx, picreg -%ifdef PIC +%if PIC lea picregq, [sixtap_filter_hb_m] %endif mova m3, [filter_h6_shuf2] diff --git a/libavcodec/x86/sbrdsp.asm b/libavcodec/x86/sbrdsp.asm index d02f70d704..63e9f0d33a 100644 --- a/libavcodec/x86/sbrdsp.asm +++ b/libavcodec/x86/sbrdsp.asm @@ -308,7 +308,7 @@ cglobal sbr_qmf_pre_shuffle, 1,4,6,z movq [r2q], m2 RET -%ifdef PIC +%if PIC %define NREGS 1 %if UNIX64 %define NOISE_TABLE r6q ; r5q is m_max @@ -321,7 +321,7 @@ cglobal sbr_qmf_pre_shuffle, 1,4,6,z %endif %macro LOAD_NST 1 -%ifdef PIC +%if PIC lea NOISE_TABLE, [%1] mova m0, [kxq + NOISE_TABLE] %else @@ -371,7 +371,7 @@ apply_noise_main: movsxdifnidn noiseq, noised dec noiseq shl countd, 2 -%ifdef PIC +%if PIC lea NOISE_TABLE, [sbr_noise_table] %endif lea Yq, [Yq + 2*countq] diff --git a/libavcodec/x86/vp8dsp.asm b/libavcodec/x86/vp8dsp.asm index 6ac5a7721b..231c21ea0d 100644 --- a/libavcodec/x86/vp8dsp.asm +++ b/libavcodec/x86/vp8dsp.asm @@ -114,7 +114,7 @@ bilinear_filter_vb_m: times 8 db 7, 1 times 8 db 2, 6 times 8 db 1, 7 -%ifdef PIC +%if PIC %define fourtap_filter_hw picregq %define sixtap_filter_hw picregq %define fourtap_filter_hb picregq @@ -166,7 +166,7 @@ cglobal put_vp8_epel%1_h6, 6, 6 + npicregs, 8, dst, dststride, src, srcstride, h lea mxd, [mxq*3] mova m3, [filter_h6_shuf2] mova m4, [filter_h6_shuf3] -%ifdef PIC +%if PIC lea picregq, [sixtap_filter_hb_m] %endif mova m5, [sixtap_filter_hb+mxq*8-48] ; set up 6tap filter in bytes @@ -207,7 +207,7 @@ cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, h mova m2, [pw_256] mova m3, [filter_h2_shuf] mova m4, [filter_h4_shuf] -%ifdef PIC +%if PIC lea picregq, [fourtap_filter_hb_m] %endif mova m5, [fourtap_filter_hb+mxq-16] ; set up 4tap filter in bytes @@ -234,7 +234,7 @@ cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, h cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my shl myd, 4 -%ifdef PIC +%if PIC lea picregq, [fourtap_filter_hb_m] %endif mova m5, [fourtap_filter_hb+myq-16] @@ -272,7 +272,7 @@ cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picr cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my lea myd, [myq*3] -%ifdef PIC +%if PIC lea picregq, [sixtap_filter_hb_m] %endif lea myq, [sixtap_filter_hb+myq*8] @@ -326,7 +326,7 @@ FILTER_SSSE3 8 INIT_MMX mmxext cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg shl mxd, 4 -%ifdef PIC +%if PIC lea picregq, [fourtap_filter_hw_m] %endif movq mm4, [fourtap_filter_hw+mxq-16] ; set up 4tap filter in words @@ -374,7 +374,7 @@ cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, he INIT_MMX mmxext cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg lea mxd, [mxq*3] -%ifdef PIC +%if PIC lea picregq, [sixtap_filter_hw_m] %endif movq mm4, [sixtap_filter_hw+mxq*8-48] ; set up 4tap filter in words @@ -431,7 +431,7 @@ cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, he INIT_XMM sse2 cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, height, mx, picreg shl mxd, 5 -%ifdef PIC +%if PIC lea picregq, [fourtap_filter_v_m] %endif lea mxq, [fourtap_filter_v+mxq-32] @@ -480,7 +480,7 @@ INIT_XMM sse2 cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, height, mx, picreg lea mxd, [mxq*3] shl mxd, 4 -%ifdef PIC +%if PIC lea picregq, [sixtap_filter_v_m] %endif lea mxq, [sixtap_filter_v+mxq-96] @@ -543,7 +543,7 @@ cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, h ; 4x4 block, V-only 4-tap filter cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my shl myd, 5 -%ifdef PIC +%if PIC lea picregq, [fourtap_filter_v_m] %endif lea myq, [fourtap_filter_v+myq-32] @@ -597,7 +597,7 @@ cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picr cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my shl myd, 4 lea myq, [myq*3] -%ifdef PIC +%if PIC lea picregq, [sixtap_filter_v_m] %endif lea myq, [sixtap_filter_v+myq-96] @@ -667,7 +667,7 @@ FILTER_V 8 %if cpuflag(ssse3) cglobal put_vp8_bilinear%1_v, 7, 7, 5, dst, dststride, src, srcstride, height, picreg, my shl myd, 4 -%ifdef PIC +%if PIC lea picregq, [bilinear_filter_vb_m] %endif pxor m4, m4 @@ -697,7 +697,7 @@ cglobal put_vp8_bilinear%1_v, 7, 7, 5, dst, dststride, src, srcstride, height, p %else ; cpuflag(ssse3) cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, picreg, my shl myd, 4 -%ifdef PIC +%if PIC lea picregq, [bilinear_filter_vw_m] %endif pxor m6, m6 @@ -743,7 +743,7 @@ cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, p %if cpuflag(ssse3) cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride, height, mx, picreg shl mxd, 4 -%ifdef PIC +%if PIC lea picregq, [bilinear_filter_vb_m] %endif pxor m4, m4 @@ -773,7 +773,7 @@ cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride %else ; cpuflag(ssse3) cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg shl mxd, 4 -%ifdef PIC +%if PIC lea picregq, [bilinear_filter_vw_m] %endif pxor m6, m6 diff --git a/libavcodec/x86/vp9itxfm.asm b/libavcodec/x86/vp9itxfm.asm index 2c63fe514a..2f290f2f88 100644 --- a/libavcodec/x86/vp9itxfm.asm +++ b/libavcodec/x86/vp9itxfm.asm @@ -330,7 +330,9 @@ IDCT_4x4_FN ssse3 INIT_MMX %5 cglobal vp9_%1_%3_4x4_add, 3, 3, 0, dst, stride, block, eob %if WIN64 && notcpuflag(ssse3) +INIT_XMM cpuname WIN64_SPILL_XMM 8 +INIT_MMX cpuname %endif movdqa xmm5, [pd_8192] mova m0, [blockq+ 0] diff --git a/libavcodec/x86/vp9itxfm_16bpp.asm b/libavcodec/x86/vp9itxfm_16bpp.asm index 902685edf6..ebe6222285 100644 --- a/libavcodec/x86/vp9itxfm_16bpp.asm +++ b/libavcodec/x86/vp9itxfm_16bpp.asm @@ -303,7 +303,9 @@ IDCT4_10_FN %macro IADST4_FN 4 cglobal vp9_%1_%3_4x4_add_10, 3, 3, 0, dst, stride, block, eob %if WIN64 && notcpuflag(ssse3) +INIT_XMM cpuname WIN64_SPILL_XMM 8 +INIT_MMX cpuname %endif movdqa xmm5, [pd_8192] mova m0, [blockq+0*16+0] @@ -672,7 +674,7 @@ cglobal vp9_idct_idct_8x8_add_10, 4, 6 + ARCH_X86_64, 14, \ mov dstbakq, dstq movsxd cntq, cntd %endif -%ifdef PIC +%if PIC lea ptrq, [default_8x8] movzx cntd, byte [ptrq+cntq-1] %else @@ -921,7 +923,7 @@ cglobal vp9_%1_%3_8x8_add_10, 4, 6 + ARCH_X86_64, 16, \ mov dstbakq, dstq movsxd cntq, cntd %endif -%ifdef PIC +%if PIC lea ptrq, [%5_8x8] movzx cntd, byte [ptrq+cntq-1] %else @@ -1128,7 +1130,7 @@ cglobal vp9_idct_idct_16x16_add_10, 4, 6 + ARCH_X86_64, 16, \ mov dstbakq, dstq movsxd cntq, cntd %endif -%ifdef PIC +%if PIC lea ptrq, [default_16x16] movzx cntd, byte [ptrq+cntq-1] %else @@ -1445,7 +1447,7 @@ cglobal vp9_%1_%4_16x16_add_10, 4, 6 + ARCH_X86_64, 16, \ mov dstbakq, dstq movsxd cntq, cntd %endif -%ifdef PIC +%if PIC lea ptrq, [%7_16x16] movzx cntd, byte [ptrq+cntq-1] %else @@ -1958,7 +1960,7 @@ cglobal vp9_idct_idct_32x32_add_10, 4, 6 + ARCH_X86_64, 16, \ mov dstbakq, dstq movsxd cntq, cntd %endif -%ifdef PIC +%if PIC lea ptrq, [default_32x32] movzx cntd, byte [ptrq+cntq-1] %else diff --git a/libavutil/x86/x86inc.asm b/libavutil/x86/x86inc.asm index e099ee4b10..e61d924bc1 100644 --- a/libavutil/x86/x86inc.asm +++ b/libavutil/x86/x86inc.asm @@ -1,7 +1,7 @@ ;***************************************************************************** -;* x86inc.asm: x264asm abstraction layer +;* x86inc.asm: x86 abstraction layer ;***************************************************************************** -;* Copyright (C) 2005-2018 x264 project +;* Copyright (C) 2005-2024 x264 project ;* ;* Authors: Loren Merritt <lorenm@u.washington.edu> ;* Henrik Gramner <henrik@gramner.com> @@ -21,21 +21,14 @@ ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ;***************************************************************************** -; This is a header file for the x264ASM assembly language, which uses +; This is a header file for the x86inc.asm assembly language, which uses ; NASM/YASM syntax combined with a large number of macros to provide easy ; abstraction between different calling conventions (x86_32, win64, linux64). ; It also has various other useful features to simplify writing the kind of -; DSP functions that are most often used in x264. - -; Unlike the rest of x264, this file is available under an ISC license, as it -; has significant usefulness outside of x264 and we want it to be available -; to the largest audience possible. Of course, if you modify it for your own -; purposes to add a new feature, we strongly encourage contributing a patch -; as this feature might be useful for others as well. Send patches or ideas -; to x264-devel@videolan.org . +; DSP functions that are most often used. %ifndef private_prefix - %define private_prefix x264 + %error private_prefix not defined %endif %ifndef public_prefix @@ -68,12 +61,19 @@ %endif %define FORMAT_ELF 0 +%define FORMAT_MACHO 0 %ifidn __OUTPUT_FORMAT__,elf %define FORMAT_ELF 1 %elifidn __OUTPUT_FORMAT__,elf32 %define FORMAT_ELF 1 %elifidn __OUTPUT_FORMAT__,elf64 %define FORMAT_ELF 1 +%elifidn __OUTPUT_FORMAT__,macho + %define FORMAT_MACHO 1 +%elifidn __OUTPUT_FORMAT__,macho32 + %define FORMAT_MACHO 1 +%elifidn __OUTPUT_FORMAT__,macho64 + %define FORMAT_MACHO 1 %endif %ifdef PREFIX @@ -82,6 +82,11 @@ %define mangle(x) x %endif +; Use VEX-encoding even in non-AVX functions +%ifndef FORCE_VEX_ENCODING + %define FORCE_VEX_ENCODING 0 +%endif + ; aout does not support align= ; NOTE: This section is out of sync with x264, in order to ; keep supporting OS/2. @@ -99,28 +104,27 @@ %endif %endmacro -%if WIN64 - %define PIC -%elif ARCH_X86_64 == 0 -; x86_32 doesn't require PIC. -; Some distros prefer shared objects to be PIC, but nothing breaks if -; the code contains a few textrels, so we'll skip that complexity. - %undef PIC -%endif -%ifdef PIC +%if ARCH_X86_64 + %define PIC 1 ; always use PIC on x86-64 default rel +%elifidn __OUTPUT_FORMAT__,win32 + %define PIC 0 ; PIC isn't used on 32-bit Windows +%elifndef PIC + %define PIC 0 %endif -%macro CPUNOP 1 - %if HAVE_CPUNOP - CPU %1 +%define HAVE_PRIVATE_EXTERN 1 +%ifdef __NASM_VERSION_ID__ + %use smartalign + %if __NASM_VERSION_ID__ < 0x020e0000 ; 2.14 + %define HAVE_PRIVATE_EXTERN 0 %endif -%endmacro +%endif ; Macros to eliminate most code duplication between x86_32 and x86_64: ; Currently this works only for leaf functions which load all their arguments ; into registers at the start, and make no other use of the stack. Luckily that -; covers most of x264's asm. +; covers most use cases. ; PROLOGUE: ; %1 = number of arguments. loads them from stack if needed. @@ -232,6 +236,18 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %define gprsize 4 %endif +%macro LEA 2 +%if ARCH_X86_64 + lea %1, [%2] +%elif PIC + call $+5 ; special-cased to not affect the RSB on most CPU:s + pop %1 + add %1, (%2)-$+1 +%else + mov %1, %2 +%endif +%endmacro + ; Repeats an instruction/operation for multiple arguments. ; Example usage: "REPX {psrlw x, 8}, m0, m1, m2, m3" %macro REPX 2-* ; operation, args @@ -303,6 +319,10 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %endif %endmacro +%if ARCH_X86_64 == 0 + %define movsxd movifnidn +%endif + %macro movsxdifnidn 2 %ifnidn %1, %2 movsxd %1, %2 @@ -354,7 +374,46 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %define vzeroupper_required (mmsize > 16 && (ARCH_X86_64 == 0 || xmm_regs_used > 16 || notcpuflag(avx512))) %define high_mm_regs (16*cpuflag(avx512)) -%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only) +; Large stack allocations on Windows need to use stack probing in order +; to guarantee that all stack memory is committed before accessing it. +; This is done by ensuring that the guard page(s) at the end of the +; currently committed pages are touched prior to any pages beyond that. +%if WIN64 + %assign STACK_PROBE_SIZE 8192 +%elifidn __OUTPUT_FORMAT__, win32 + %assign STACK_PROBE_SIZE 4096 +%else + %assign STACK_PROBE_SIZE 0 +%endif + +%macro PROBE_STACK 1 ; stack_size + %if STACK_PROBE_SIZE + %assign %%i STACK_PROBE_SIZE + %rep %1 / STACK_PROBE_SIZE + mov eax, [rsp-%%i] + %assign %%i %%i+STACK_PROBE_SIZE + %endrep + %endif +%endmacro + +%macro RESET_STACK_STATE 0 + %ifidn rstk, rsp + %assign stack_offset stack_offset - stack_size_padded + %else + %xdefine rstk rsp + %endif + %assign stack_size 0 + %assign stack_size_padded 0 + %assign xmm_regs_used 0 +%endmacro + +%macro ALLOC_STACK 0-2 0, 0 ; stack_size, n_xmm_regs + RESET_STACK_STATE + %ifnum %2 + %if mmsize != 8 + %assign xmm_regs_used %2 + %endif + %endif %ifnum %1 %if %1 != 0 %assign %%pad 0 @@ -364,16 +423,14 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %endif %if WIN64 %assign %%pad %%pad + 32 ; shadow space - %if mmsize != 8 - %assign xmm_regs_used %2 - %if xmm_regs_used > 8 - %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers - %endif + %if xmm_regs_used > 8 + %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers %endif %endif %if required_stack_alignment <= STACK_ALIGNMENT ; maintain the current stack alignment %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1)) + PROBE_STACK stack_size_padded SUB rsp, stack_size_padded %else %assign %%reg_num (regs_used - 1) @@ -389,6 +446,7 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %xdefine rstkm rstk %endif %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1)) + PROBE_STACK stack_size_padded mov rstk, rsp and rsp, ~(required_stack_alignment-1) sub rsp, stack_size_padded @@ -399,7 +457,7 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %endif %endmacro -%macro SETUP_STACK_POINTER 1 +%macro SETUP_STACK_POINTER 0-1 0 %ifnum %1 %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT %if %1 > 0 @@ -462,35 +520,62 @@ DECLARE_REG 14, R13, 120 %endif %endmacro -%macro WIN64_PUSH_XMM 0 - ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated. - %if xmm_regs_used > 6 + high_mm_regs - movaps [rstk + stack_offset + 8], xmm6 - %endif - %if xmm_regs_used > 7 + high_mm_regs - movaps [rstk + stack_offset + 24], xmm7 - %endif - %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8 - %if %%xmm_regs_on_stack > 0 - %assign %%i 8 - %rep %%xmm_regs_on_stack - movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i - %assign %%i %%i+1 - %endrep +; Push XMM registers to the stack. If no argument is specified all used register +; will be pushed, otherwise only push previously unpushed registers. +%macro WIN64_PUSH_XMM 0-2 ; new_xmm_regs_used, xmm_regs_pushed + %if mmsize != 8 + %if %0 == 2 + %assign %%pushed %2 + %assign xmm_regs_used %1 + %elif %0 == 1 + %assign %%pushed xmm_regs_used + %assign xmm_regs_used %1 + %else + %assign %%pushed 0 + %endif + ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated. + %if %%pushed <= 6 + high_mm_regs && xmm_regs_used > 6 + high_mm_regs + movaps [rstk + stack_offset + 8], xmm6 + %endif + %if %%pushed <= 7 + high_mm_regs && xmm_regs_used > 7 + high_mm_regs + movaps [rstk + stack_offset + 24], xmm7 + %endif + %assign %%pushed %%pushed - high_mm_regs - 8 + %if %%pushed < 0 + %assign %%pushed 0 + %endif + %assign %%regs_to_push xmm_regs_used - %%pushed - high_mm_regs - 8 + %if %%regs_to_push > 0 + ASSERT (%%regs_to_push + %%pushed) * 16 <= stack_size_padded - stack_size - 32 + %assign %%i %%pushed + 8 + %rep %%regs_to_push + movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i + %assign %%i %%i+1 + %endrep + %endif %endif %endmacro -%macro WIN64_SPILL_XMM 1 - %assign xmm_regs_used %1 - ASSERT xmm_regs_used <= 16 + high_mm_regs - %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8 - %if %%xmm_regs_on_stack > 0 - ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack. - %assign %%pad %%xmm_regs_on_stack*16 + 32 - %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1)) - SUB rsp, stack_size_padded +; Allocated stack space for XMM registers and push all, or a subset, of those +%macro WIN64_SPILL_XMM 1-2 ; xmm_regs_used, xmm_regs_reserved + RESET_STACK_STATE + %if mmsize != 8 + %assign xmm_regs_used %1 + ASSERT xmm_regs_used <= 16 + high_mm_regs + %if %0 == 2 + ASSERT %2 >= %1 + %assign %%xmm_regs_on_stack %2 - high_mm_regs - 8 + %else + %assign %%xmm_regs_on_stack %1 - high_mm_regs - 8 + %endif + %if %%xmm_regs_on_stack > 0 + ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack. + %assign %%pad %%xmm_regs_on_stack*16 + 32 + %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1)) + SUB rsp, stack_size_padded + %endif + WIN64_PUSH_XMM %endif - WIN64_PUSH_XMM %endmacro %macro WIN64_RESTORE_XMM_INTERNAL 0 @@ -521,9 +606,7 @@ DECLARE_REG 14, R13, 120 %macro WIN64_RESTORE_XMM 0 WIN64_RESTORE_XMM_INTERNAL - %assign stack_offset (stack_offset-stack_size_padded) - %assign stack_size_padded 0 - %assign xmm_regs_used 0 + RESET_STACK_STATE %endmacro %define has_epilogue regs_used > 7 || stack_size > 0 || vzeroupper_required || xmm_regs_used > 6+high_mm_regs @@ -558,12 +641,11 @@ DECLARE_REG 14, R13, 72 %macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names... %assign num_args %1 %assign regs_used %2 - %assign xmm_regs_used %3 ASSERT regs_used >= num_args SETUP_STACK_POINTER %4 ASSERT regs_used <= 15 PUSH_IF_USED 9, 10, 11, 12, 13, 14 - ALLOC_STACK %4 + ALLOC_STACK %4, %3 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14 %if %0 > 4 %ifnum %4 @@ -627,7 +709,7 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 SETUP_STACK_POINTER %4 ASSERT regs_used <= 7 PUSH_IF_USED 3, 4, 5, 6 - ALLOC_STACK %4 + ALLOC_STACK %4, %3 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6 %if %0 > 4 %ifnum %4 @@ -660,11 +742,19 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 %endif ;====================================================================== %if WIN64 == 0 - %macro WIN64_SPILL_XMM 1 + %macro WIN64_SPILL_XMM 1-2 + RESET_STACK_STATE + %if mmsize != 8 + %assign xmm_regs_used %1 + %endif %endmacro %macro WIN64_RESTORE_XMM 0 + RESET_STACK_STATE %endmacro - %macro WIN64_PUSH_XMM 0 + %macro WIN64_PUSH_XMM 0-2 + %if mmsize != 8 && %0 >= 1 + %assign xmm_regs_used %1 + %endif %endmacro %endif @@ -705,7 +795,7 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp -%macro TAIL_CALL 2 ; callee, is_nonadjacent +%macro TAIL_CALL 1-2 1 ; callee, is_nonadjacent %if has_epilogue call %1 RET @@ -735,22 +825,25 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %endmacro %macro cglobal_internal 2-3+ annotate_function_size - %if %1 - %xdefine %%FUNCTION_PREFIX private_prefix - %xdefine %%VISIBILITY hidden - %else - %xdefine %%FUNCTION_PREFIX public_prefix - %xdefine %%VISIBILITY - %endif %ifndef cglobaled_%2 - %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2) + %if %1 + %xdefine %2 mangle(private_prefix %+ _ %+ %2) + %else + %xdefine %2 mangle(public_prefix %+ _ %+ %2) + %endif %xdefine %2.skip_prologue %2 %+ .skip_prologue CAT_XDEFINE cglobaled_, %2, 1 %endif %xdefine current_function %2 %xdefine current_function_section __SECT__ %if FORMAT_ELF - global %2:function %%VISIBILITY + %if %1 + global %2:function hidden + %else + global %2:function + %endif + %elif FORMAT_MACHO && HAVE_PRIVATE_EXTERN && %1 + global %2:private_extern %else global %2 %endif @@ -771,6 +864,8 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %macro cglobal_label 1 %if FORMAT_ELF global current_function %+ %1:function hidden + %elif FORMAT_MACHO && HAVE_PRIVATE_EXTERN + global current_function %+ %1:private_extern %else global current_function %+ %1 %endif @@ -796,15 +891,34 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %xdefine %1 mangle(private_prefix %+ _ %+ %1) %if FORMAT_ELF global %1:data hidden + %elif FORMAT_MACHO && HAVE_PRIVATE_EXTERN + global %1:private_extern %else global %1 %endif %1: %2 %endmacro -; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default. %if FORMAT_ELF + ; The GNU linker assumes the stack is executable by default. [SECTION .note.GNU-stack noalloc noexec nowrite progbits] + + %ifdef __NASM_VERSION_ID__ + %if __NASM_VERSION_ID__ >= 0x020e0300 ; 2.14.03 + %if ARCH_X86_64 + ; Control-flow Enforcement Technology (CET) properties. + [SECTION .note.gnu.property alloc noexec nowrite note align=gprsize] + dd 0x00000004 ; n_namesz + dd gprsize + 8 ; n_descsz + dd 0x00000005 ; n_type = NT_GNU_PROPERTY_TYPE_0 + db "GNU",0 ; n_name + dd 0xc0000002 ; pr_type = GNU_PROPERTY_X86_FEATURE_1_AND + dd 0x00000004 ; pr_datasz + dd 0x00000002 ; pr_data = GNU_PROPERTY_X86_FEATURE_1_SHSTK + dd 0x00000000 ; pr_padding + %endif + %endif + %endif %endif ; Tell debuggers how large the function was. @@ -828,32 +942,34 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, ; cpuflags %assign cpuflags_mmx (1<<0) -%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx -%assign cpuflags_3dnow (1<<2) | cpuflags_mmx -%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow -%assign cpuflags_sse (1<<4) | cpuflags_mmx2 -%assign cpuflags_sse2 (1<<5) | cpuflags_sse -%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2 -%assign cpuflags_lzcnt (1<<7) | cpuflags_sse2 -%assign cpuflags_sse3 (1<<8) | cpuflags_sse2 -%assign cpuflags_ssse3 (1<<9) | cpuflags_sse3 -%assign cpuflags_sse4 (1<<10)| cpuflags_ssse3 -%assign cpuflags_sse42 (1<<11)| cpuflags_sse4 -%assign cpuflags_aesni (1<<12)| cpuflags_sse42 -%assign cpuflags_avx (1<<13)| cpuflags_sse42 -%assign cpuflags_xop (1<<14)| cpuflags_avx -%assign cpuflags_fma4 (1<<15)| cpuflags_avx -%assign cpuflags_fma3 (1<<16)| cpuflags_avx -%assign cpuflags_bmi1 (1<<17)| cpuflags_avx|cpuflags_lzcnt -%assign cpuflags_bmi2 (1<<18)| cpuflags_bmi1 -%assign cpuflags_avx2 (1<<19)| cpuflags_fma3|cpuflags_bmi2 -%assign cpuflags_avx512 (1<<20)| cpuflags_avx2 ; F, CD, BW, DQ, VL -%assign cpuflags_avx512icl (1<<25)| cpuflags_avx512 - -%assign cpuflags_cache32 (1<<21) -%assign cpuflags_cache64 (1<<22) -%assign cpuflags_aligned (1<<23) ; not a cpu feature, but a function variant -%assign cpuflags_atom (1<<24) +%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx +%assign cpuflags_3dnow (1<<2) | cpuflags_mmx +%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow +%assign cpuflags_sse (1<<4) | cpuflags_mmx2 +%assign cpuflags_sse2 (1<<5) | cpuflags_sse +%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2 +%assign cpuflags_lzcnt (1<<7) | cpuflags_sse2 +%assign cpuflags_sse3 (1<<8) | cpuflags_sse2 +%assign cpuflags_ssse3 (1<<9) | cpuflags_sse3 +%assign cpuflags_sse4 (1<<10) | cpuflags_ssse3 +%assign cpuflags_sse42 (1<<11) | cpuflags_sse4 +%assign cpuflags_aesni (1<<12) | cpuflags_sse42 +%assign cpuflags_clmul (1<<13) | cpuflags_sse42 +%assign cpuflags_gfni (1<<14) | cpuflags_aesni|cpuflags_clmul +%assign cpuflags_avx (1<<15) | cpuflags_sse42 +%assign cpuflags_xop (1<<16) | cpuflags_avx +%assign cpuflags_fma4 (1<<17) | cpuflags_avx +%assign cpuflags_fma3 (1<<18) | cpuflags_avx +%assign cpuflags_bmi1 (1<<19) | cpuflags_avx|cpuflags_lzcnt +%assign cpuflags_bmi2 (1<<20) | cpuflags_bmi1 +%assign cpuflags_avx2 (1<<21) | cpuflags_fma3|cpuflags_bmi2 +%assign cpuflags_avx512 (1<<22) | cpuflags_avx2 ; F, CD, BW, DQ, VL +%assign cpuflags_avx512icl (1<<23) | cpuflags_avx512|cpuflags_gfni ; VNNI, IFMA, VBMI, VBMI2, VPOPCNTDQ, BITALG, VAES, VPCLMULQDQ + +%assign cpuflags_cache32 (1<<24) +%assign cpuflags_cache64 (1<<25) +%assign cpuflags_aligned (1<<26) ; not a cpu feature, but a function variant +%assign cpuflags_atom (1<<27) ; Returns a boolean value expressing whether or not the specified cpuflag is enabled. %define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1) @@ -895,9 +1011,17 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %endif %if ARCH_X86_64 || cpuflag(sse2) - CPUNOP amdnop + %ifdef __NASM_VERSION_ID__ + ALIGNMODE p6 + %else + CPU amdnop + %endif %else - CPUNOP basicnop + %ifdef __NASM_VERSION_ID__ + ALIGNMODE nop + %else + CPU basicnop + %endif %endif %endmacro @@ -971,7 +1095,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %endmacro %macro INIT_XMM 0-1+ - %assign avx_enabled 0 + %assign avx_enabled FORCE_VEX_ENCODING %define RESET_MM_PERMUTATION INIT_XMM %1 %define mmsize 16 %define mova movdqa @@ -983,6 +1107,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %if WIN64 AVX512_MM_PERMUTATION 6 ; Swap callee-saved registers with volatile registers %endif + %xdefine bcstw 1to8 + %xdefine bcstd 1to4 + %xdefine bcstq 1to2 %endmacro %macro INIT_YMM 0-1+ @@ -996,6 +1123,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, INIT_CPUFLAGS %1 DEFINE_MMREGS ymm AVX512_MM_PERMUTATION + %xdefine bcstw 1to16 + %xdefine bcstd 1to8 + %xdefine bcstq 1to4 %endmacro %macro INIT_ZMM 0-1+ @@ -1009,6 +1139,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, INIT_CPUFLAGS %1 DEFINE_MMREGS zmm AVX512_MM_PERMUTATION + %xdefine bcstw 1to32 + %xdefine bcstd 1to16 + %xdefine bcstq 1to8 %endmacro INIT_XMM @@ -1106,19 +1239,32 @@ INIT_XMM %endif %assign %%i 0 %rep num_mmregs - CAT_XDEFINE %%f, %%i, m %+ %%i + %xdefine %%tmp m %+ %%i + CAT_XDEFINE %%f, %%i, regnumof %+ %%tmp %assign %%i %%i+1 %endrep %endmacro -%macro LOAD_MM_PERMUTATION 1 ; name to load from - %ifdef %1_m0 +%macro LOAD_MM_PERMUTATION 0-1 ; name to load from + %if %0 + %xdefine %%f %1_m + %else + %xdefine %%f current_function %+ _m + %endif + %xdefine %%tmp %%f %+ 0 + %ifnum %%tmp + DEFINE_MMREGS mmtype %assign %%i 0 %rep num_mmregs - CAT_XDEFINE m, %%i, %1_m %+ %%i - CAT_XDEFINE nn, m %+ %%i, %%i + %xdefine %%tmp %%f %+ %%i + CAT_XDEFINE %%m, %%i, m %+ %%tmp %assign %%i %%i+1 %endrep + %rep num_mmregs + %assign %%i %%i-1 + CAT_XDEFINE m, %%i, %%m %+ %%i + CAT_XDEFINE nn, m %+ %%i, %%i + %endrep %endif %endmacro @@ -1224,8 +1370,22 @@ INIT_XMM %ifdef cpuname %if notcpuflag(%2) %error use of ``%1'' %2 instruction in cpuname function: current_function - %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8 + %elif %3 == 0 && __sizeofreg == 16 && notcpuflag(sse2) %error use of ``%1'' sse2 instruction in cpuname function: current_function + %elif %3 == 0 && __sizeofreg == 32 && notcpuflag(avx2) + %error use of ``%1'' avx2 instruction in cpuname function: current_function + %elif __sizeofreg == 16 && notcpuflag(sse) + %error use of ``%1'' sse instruction in cpuname function: current_function + %elif __sizeofreg == 32 && notcpuflag(avx) + %error use of ``%1'' avx instruction in cpuname function: current_function + %elif __sizeofreg == 64 && notcpuflag(avx512) + %error use of ``%1'' avx512 instruction in cpuname function: current_function + %elifidn %1, pextrw ; special case because the base instruction is mmx2, + %ifnid %6 ; but sse4 is required for memory operands + %if notcpuflag(sse4) + %error use of ``%1'' sse4 instruction in cpuname function: current_function + %endif + %endif %endif %endif %endif @@ -1267,11 +1427,79 @@ INIT_XMM %1 %6, __src2 %endif %elif %0 >= 9 - __instr %6, %7, %8, %9 + %if avx_enabled && __sizeofreg >= 16 && %4 == 1 + %ifnnum regnumof%7 + %if %3 + vmovaps %6, %7 + %else + vmovdqa %6, %7 + %endif + __instr %6, %6, %8, %9 + %else + __instr %6, %7, %8, %9 + %endif + %else + __instr %6, %7, %8, %9 + %endif %elif %0 == 8 - __instr %6, %7, %8 + %if avx_enabled && __sizeofreg >= 16 && %4 == 0 + %xdefine __src1 %7 + %xdefine __src2 %8 + %if %5 + %ifnum regnumof%7 + %ifnum regnumof%8 + %if regnumof%7 < 8 && regnumof%8 >= 8 && regnumof%8 < 16 && sizeof%8 <= 32 + ; Most VEX-encoded instructions require an additional byte to encode when + ; src2 is a high register (e.g. m8..15). If the instruction is commutative + ; we can swap src1 and src2 when doing so reduces the instruction length. + %xdefine __src1 %8 + %xdefine __src2 %7 + %endif + %endif + %elifnum regnumof%8 ; put memory operands in src2 when possible + %xdefine __src1 %8 + %xdefine __src2 %7 + %else + %assign __emulate_avx 1 + %endif + %elifnnum regnumof%7 + ; EVEX allows imm8 shift instructions to be used with memory operands, + ; but VEX does not. This handles those special cases. + %ifnnum %8 + %assign __emulate_avx 1 + %elif notcpuflag(avx512) + %assign __emulate_avx 1 + %endif + %endif + %if __emulate_avx ; a separate load is required + %if %3 + vmovaps %6, %7 + %else + vmovdqa %6, %7 + %endif + __instr %6, %6, %8 + %else + __instr %6, __src1, __src2 + %endif + %else + __instr %6, %7, %8 + %endif %elif %0 == 7 - __instr %6, %7 + %if avx_enabled && __sizeofreg >= 16 && %5 + %xdefine __src1 %6 + %xdefine __src2 %7 + %ifnum regnumof%6 + %ifnum regnumof%7 + %if regnumof%6 < 8 && regnumof%7 >= 8 && regnumof%7 < 16 && sizeof%7 <= 32 + %xdefine __src1 %7 + %xdefine __src2 %6 + %endif + %endif + %endif + __instr %6, __src1, __src2 + %else + __instr %6, %7 + %endif %else __instr %6 %endif @@ -1318,8 +1546,8 @@ AVX_INSTR andpd, sse2, 1, 0, 1 AVX_INSTR andps, sse, 1, 0, 1 AVX_INSTR blendpd, sse4, 1, 1, 0 AVX_INSTR blendps, sse4, 1, 1, 0 -AVX_INSTR blendvpd, sse4 ; can't be emulated -AVX_INSTR blendvps, sse4 ; can't be emulated +AVX_INSTR blendvpd, sse4, 1, 1, 0 ; last operand must be xmm0 with legacy encoding +AVX_INSTR blendvps, sse4, 1, 1, 0 ; last operand must be xmm0 with legacy encoding AVX_INSTR cmpeqpd, sse2, 1, 0, 1 AVX_INSTR cmpeqps, sse, 1, 0, 1 AVX_INSTR cmpeqsd, sse2, 1, 0, 0 @@ -1356,38 +1584,41 @@ AVX_INSTR cmpunordpd, sse2, 1, 0, 1 AVX_INSTR cmpunordps, sse, 1, 0, 1 AVX_INSTR cmpunordsd, sse2, 1, 0, 0 AVX_INSTR cmpunordss, sse, 1, 0, 0 -AVX_INSTR comisd, sse2 -AVX_INSTR comiss, sse -AVX_INSTR cvtdq2pd, sse2 -AVX_INSTR cvtdq2ps, sse2 -AVX_INSTR cvtpd2dq, sse2 -AVX_INSTR cvtpd2ps, sse2 -AVX_INSTR cvtps2dq, sse2 -AVX_INSTR cvtps2pd, sse2 -AVX_INSTR cvtsd2si, sse2 +AVX_INSTR comisd, sse2, 1 +AVX_INSTR comiss, sse, 1 +AVX_INSTR cvtdq2pd, sse2, 1 +AVX_INSTR cvtdq2ps, sse2, 1 +AVX_INSTR cvtpd2dq, sse2, 1 +AVX_INSTR cvtpd2ps, sse2, 1 +AVX_INSTR cvtps2dq, sse2, 1 +AVX_INSTR cvtps2pd, sse2, 1 +AVX_INSTR cvtsd2si, sse2, 1 AVX_INSTR cvtsd2ss, sse2, 1, 0, 0 AVX_INSTR cvtsi2sd, sse2, 1, 0, 0 AVX_INSTR cvtsi2ss, sse, 1, 0, 0 AVX_INSTR cvtss2sd, sse2, 1, 0, 0 -AVX_INSTR cvtss2si, sse -AVX_INSTR cvttpd2dq, sse2 -AVX_INSTR cvttps2dq, sse2 -AVX_INSTR cvttsd2si, sse2 -AVX_INSTR cvttss2si, sse +AVX_INSTR cvtss2si, sse, 1 +AVX_INSTR cvttpd2dq, sse2, 1 +AVX_INSTR cvttps2dq, sse2, 1 +AVX_INSTR cvttsd2si, sse2, 1 +AVX_INSTR cvttss2si, sse, 1 AVX_INSTR divpd, sse2, 1, 0, 0 AVX_INSTR divps, sse, 1, 0, 0 AVX_INSTR divsd, sse2, 1, 0, 0 AVX_INSTR divss, sse, 1, 0, 0 AVX_INSTR dppd, sse4, 1, 1, 0 AVX_INSTR dpps, sse4, 1, 1, 0 -AVX_INSTR extractps, sse4 +AVX_INSTR extractps, sse4, 1 +AVX_INSTR gf2p8affineinvqb, gfni, 0, 1, 0 +AVX_INSTR gf2p8affineqb, gfni, 0, 1, 0 +AVX_INSTR gf2p8mulb, gfni, 0, 0, 0 AVX_INSTR haddpd, sse3, 1, 0, 0 AVX_INSTR haddps, sse3, 1, 0, 0 AVX_INSTR hsubpd, sse3, 1, 0, 0 AVX_INSTR hsubps, sse3, 1, 0, 0 AVX_INSTR insertps, sse4, 1, 1, 0 AVX_INSTR lddqu, sse3 -AVX_INSTR ldmxcsr, sse +AVX_INSTR ldmxcsr, sse, 1 AVX_INSTR maskmovdqu, sse2 AVX_INSTR maxpd, sse2, 1, 0, 1 AVX_INSTR maxps, sse, 1, 0, 1 @@ -1397,10 +1628,10 @@ AVX_INSTR minpd, sse2, 1, 0, 1 AVX_INSTR minps, sse, 1, 0, 1 AVX_INSTR minsd, sse2, 1, 0, 0 AVX_INSTR minss, sse, 1, 0, 0 -AVX_INSTR movapd, sse2 -AVX_INSTR movaps, sse +AVX_INSTR movapd, sse2, 1 +AVX_INSTR movaps, sse, 1 AVX_INSTR movd, mmx -AVX_INSTR movddup, sse3 +AVX_INSTR movddup, sse3, 1 AVX_INSTR movdqa, sse2 AVX_INSTR movdqu, sse2 AVX_INSTR movhlps, sse, 1, 0, 0 @@ -1409,19 +1640,19 @@ AVX_INSTR movhps, sse, 1, 0, 0 AVX_INSTR movlhps, sse, 1, 0, 0 AVX_INSTR movlpd, sse2, 1, 0, 0 AVX_INSTR movlps, sse, 1, 0, 0 -AVX_INSTR movmskpd, sse2 -AVX_INSTR movmskps, sse +AVX_INSTR movmskpd, sse2, 1 +AVX_INSTR movmskps, sse, 1 AVX_INSTR movntdq, sse2 AVX_INSTR movntdqa, sse4 -AVX_INSTR movntpd, sse2 -AVX_INSTR movntps, sse +AVX_INSTR movntpd, sse2, 1 +AVX_INSTR movntps, sse, 1 AVX_INSTR movq, mmx AVX_INSTR movsd, sse2, 1, 0, 0 -AVX_INSTR movshdup, sse3 -AVX_INSTR movsldup, sse3 +AVX_INSTR movshdup, sse3, 1 +AVX_INSTR movsldup, sse3, 1 AVX_INSTR movss, sse, 1, 0, 0 -AVX_INSTR movupd, sse2 -AVX_INSTR movups, sse +AVX_INSTR movupd, sse2, 1 +AVX_INSTR movups, sse, 1 AVX_INSTR mpsadbw, sse4, 0, 1, 0 AVX_INSTR mulpd, sse2, 1, 0, 1 AVX_INSTR mulps, sse, 1, 0, 1 @@ -1432,90 +1663,90 @@ AVX_INSTR orps, sse, 1, 0, 1 AVX_INSTR pabsb, ssse3 AVX_INSTR pabsd, ssse3 AVX_INSTR pabsw, ssse3 -AVX_INSTR packsswb, mmx, 0, 0, 0 AVX_INSTR packssdw, mmx, 0, 0, 0 -AVX_INSTR packuswb, mmx, 0, 0, 0 +AVX_INSTR packsswb, mmx, 0, 0, 0 AVX_INSTR packusdw, sse4, 0, 0, 0 +AVX_INSTR packuswb, mmx, 0, 0, 0 AVX_INSTR paddb, mmx, 0, 0, 1 -AVX_INSTR paddw, mmx, 0, 0, 1 AVX_INSTR paddd, mmx, 0, 0, 1 AVX_INSTR paddq, sse2, 0, 0, 1 AVX_INSTR paddsb, mmx, 0, 0, 1 AVX_INSTR paddsw, mmx, 0, 0, 1 AVX_INSTR paddusb, mmx, 0, 0, 1 AVX_INSTR paddusw, mmx, 0, 0, 1 +AVX_INSTR paddw, mmx, 0, 0, 1 AVX_INSTR palignr, ssse3, 0, 1, 0 AVX_INSTR pand, mmx, 0, 0, 1 AVX_INSTR pandn, mmx, 0, 0, 0 AVX_INSTR pavgb, mmx2, 0, 0, 1 AVX_INSTR pavgw, mmx2, 0, 0, 1 -AVX_INSTR pblendvb, sse4 ; can't be emulated +AVX_INSTR pblendvb, sse4, 0, 1, 0 ; last operand must be xmm0 with legacy encoding AVX_INSTR pblendw, sse4, 0, 1, 0 -AVX_INSTR pclmulqdq, fnord, 0, 1, 0 -AVX_INSTR pclmulhqhqdq, fnord, 0, 0, 0 -AVX_INSTR pclmulhqlqdq, fnord, 0, 0, 0 -AVX_INSTR pclmullqhqdq, fnord, 0, 0, 0 -AVX_INSTR pclmullqlqdq, fnord, 0, 0, 0 -AVX_INSTR pcmpestri, sse42 -AVX_INSTR pcmpestrm, sse42 -AVX_INSTR pcmpistri, sse42 -AVX_INSTR pcmpistrm, sse42 +AVX_INSTR pclmulhqhqdq, clmul, 0, 0, 0 +AVX_INSTR pclmulhqlqdq, clmul, 0, 0, 0 +AVX_INSTR pclmullqhqdq, clmul, 0, 0, 0 +AVX_INSTR pclmullqlqdq, clmul, 0, 0, 0 +AVX_INSTR pclmulqdq, clmul, 0, 1, 0 AVX_INSTR pcmpeqb, mmx, 0, 0, 1 -AVX_INSTR pcmpeqw, mmx, 0, 0, 1 AVX_INSTR pcmpeqd, mmx, 0, 0, 1 AVX_INSTR pcmpeqq, sse4, 0, 0, 1 +AVX_INSTR pcmpeqw, mmx, 0, 0, 1 +AVX_INSTR pcmpestri, sse42 +AVX_INSTR pcmpestrm, sse42 AVX_INSTR pcmpgtb, mmx, 0, 0, 0 -AVX_INSTR pcmpgtw, mmx, 0, 0, 0 AVX_INSTR pcmpgtd, mmx, 0, 0, 0 AVX_INSTR pcmpgtq, sse42, 0, 0, 0 +AVX_INSTR pcmpgtw, mmx, 0, 0, 0 +AVX_INSTR pcmpistri, sse42 +AVX_INSTR pcmpistrm, sse42 AVX_INSTR pextrb, sse4 AVX_INSTR pextrd, sse4 AVX_INSTR pextrq, sse4 AVX_INSTR pextrw, mmx2 -AVX_INSTR phaddw, ssse3, 0, 0, 0 AVX_INSTR phaddd, ssse3, 0, 0, 0 AVX_INSTR phaddsw, ssse3, 0, 0, 0 +AVX_INSTR phaddw, ssse3, 0, 0, 0 AVX_INSTR phminposuw, sse4 -AVX_INSTR phsubw, ssse3, 0, 0, 0 AVX_INSTR phsubd, ssse3, 0, 0, 0 AVX_INSTR phsubsw, ssse3, 0, 0, 0 +AVX_INSTR phsubw, ssse3, 0, 0, 0 AVX_INSTR pinsrb, sse4, 0, 1, 0 AVX_INSTR pinsrd, sse4, 0, 1, 0 AVX_INSTR pinsrq, sse4, 0, 1, 0 AVX_INSTR pinsrw, mmx2, 0, 1, 0 -AVX_INSTR pmaddwd, mmx, 0, 0, 1 AVX_INSTR pmaddubsw, ssse3, 0, 0, 0 +AVX_INSTR pmaddwd, mmx, 0, 0, 1 AVX_INSTR pmaxsb, sse4, 0, 0, 1 -AVX_INSTR pmaxsw, mmx2, 0, 0, 1 AVX_INSTR pmaxsd, sse4, 0, 0, 1 +AVX_INSTR pmaxsw, mmx2, 0, 0, 1 AVX_INSTR pmaxub, mmx2, 0, 0, 1 -AVX_INSTR pmaxuw, sse4, 0, 0, 1 AVX_INSTR pmaxud, sse4, 0, 0, 1 +AVX_INSTR pmaxuw, sse4, 0, 0, 1 AVX_INSTR pminsb, sse4, 0, 0, 1 -AVX_INSTR pminsw, mmx2, 0, 0, 1 AVX_INSTR pminsd, sse4, 0, 0, 1 +AVX_INSTR pminsw, mmx2, 0, 0, 1 AVX_INSTR pminub, mmx2, 0, 0, 1 -AVX_INSTR pminuw, sse4, 0, 0, 1 AVX_INSTR pminud, sse4, 0, 0, 1 +AVX_INSTR pminuw, sse4, 0, 0, 1 AVX_INSTR pmovmskb, mmx2 -AVX_INSTR pmovsxbw, sse4 AVX_INSTR pmovsxbd, sse4 AVX_INSTR pmovsxbq, sse4 +AVX_INSTR pmovsxbw, sse4 +AVX_INSTR pmovsxdq, sse4 AVX_INSTR pmovsxwd, sse4 AVX_INSTR pmovsxwq, sse4 -AVX_INSTR pmovsxdq, sse4 -AVX_INSTR pmovzxbw, sse4 AVX_INSTR pmovzxbd, sse4 AVX_INSTR pmovzxbq, sse4 +AVX_INSTR pmovzxbw, sse4 +AVX_INSTR pmovzxdq, sse4 AVX_INSTR pmovzxwd, sse4 AVX_INSTR pmovzxwq, sse4 -AVX_INSTR pmovzxdq, sse4 AVX_INSTR pmuldq, sse4, 0, 0, 1 AVX_INSTR pmulhrsw, ssse3, 0, 0, 1 AVX_INSTR pmulhuw, mmx2, 0, 0, 1 AVX_INSTR pmulhw, mmx, 0, 0, 1 -AVX_INSTR pmullw, mmx, 0, 0, 1 AVX_INSTR pmulld, sse4, 0, 0, 1 +AVX_INSTR pmullw, mmx, 0, 0, 1 AVX_INSTR pmuludq, sse2, 0, 0, 1 AVX_INSTR por, mmx, 0, 0, 1 AVX_INSTR psadbw, mmx2, 0, 0, 1 @@ -1524,57 +1755,57 @@ AVX_INSTR pshufd, sse2 AVX_INSTR pshufhw, sse2 AVX_INSTR pshuflw, sse2 AVX_INSTR psignb, ssse3, 0, 0, 0 -AVX_INSTR psignw, ssse3, 0, 0, 0 AVX_INSTR psignd, ssse3, 0, 0, 0 -AVX_INSTR psllw, mmx, 0, 0, 0 +AVX_INSTR psignw, ssse3, 0, 0, 0 AVX_INSTR pslld, mmx, 0, 0, 0 -AVX_INSTR psllq, mmx, 0, 0, 0 AVX_INSTR pslldq, sse2, 0, 0, 0 -AVX_INSTR psraw, mmx, 0, 0, 0 +AVX_INSTR psllq, mmx, 0, 0, 0 +AVX_INSTR psllw, mmx, 0, 0, 0 AVX_INSTR psrad, mmx, 0, 0, 0 -AVX_INSTR psrlw, mmx, 0, 0, 0 +AVX_INSTR psraw, mmx, 0, 0, 0 AVX_INSTR psrld, mmx, 0, 0, 0 -AVX_INSTR psrlq, mmx, 0, 0, 0 AVX_INSTR psrldq, sse2, 0, 0, 0 +AVX_INSTR psrlq, mmx, 0, 0, 0 +AVX_INSTR psrlw, mmx, 0, 0, 0 AVX_INSTR psubb, mmx, 0, 0, 0 -AVX_INSTR psubw, mmx, 0, 0, 0 AVX_INSTR psubd, mmx, 0, 0, 0 AVX_INSTR psubq, sse2, 0, 0, 0 AVX_INSTR psubsb, mmx, 0, 0, 0 AVX_INSTR psubsw, mmx, 0, 0, 0 AVX_INSTR psubusb, mmx, 0, 0, 0 AVX_INSTR psubusw, mmx, 0, 0, 0 +AVX_INSTR psubw, mmx, 0, 0, 0 AVX_INSTR ptest, sse4 AVX_INSTR punpckhbw, mmx, 0, 0, 0 -AVX_INSTR punpckhwd, mmx, 0, 0, 0 AVX_INSTR punpckhdq, mmx, 0, 0, 0 AVX_INSTR punpckhqdq, sse2, 0, 0, 0 +AVX_INSTR punpckhwd, mmx, 0, 0, 0 AVX_INSTR punpcklbw, mmx, 0, 0, 0 -AVX_INSTR punpcklwd, mmx, 0, 0, 0 AVX_INSTR punpckldq, mmx, 0, 0, 0 AVX_INSTR punpcklqdq, sse2, 0, 0, 0 +AVX_INSTR punpcklwd, mmx, 0, 0, 0 AVX_INSTR pxor, mmx, 0, 0, 1 -AVX_INSTR rcpps, sse +AVX_INSTR rcpps, sse, 1 AVX_INSTR rcpss, sse, 1, 0, 0 -AVX_INSTR roundpd, sse4 -AVX_INSTR roundps, sse4 +AVX_INSTR roundpd, sse4, 1 +AVX_INSTR roundps, sse4, 1 AVX_INSTR roundsd, sse4, 1, 1, 0 AVX_INSTR roundss, sse4, 1, 1, 0 -AVX_INSTR rsqrtps, sse +AVX_INSTR rsqrtps, sse, 1 AVX_INSTR rsqrtss, sse, 1, 0, 0 AVX_INSTR shufpd, sse2, 1, 1, 0 AVX_INSTR shufps, sse, 1, 1, 0 -AVX_INSTR sqrtpd, sse2 -AVX_INSTR sqrtps, sse +AVX_INSTR sqrtpd, sse2, 1 +AVX_INSTR sqrtps, sse, 1 AVX_INSTR sqrtsd, sse2, 1, 0, 0 AVX_INSTR sqrtss, sse, 1, 0, 0 -AVX_INSTR stmxcsr, sse +AVX_INSTR stmxcsr, sse, 1 AVX_INSTR subpd, sse2, 1, 0, 0 AVX_INSTR subps, sse, 1, 0, 0 AVX_INSTR subsd, sse2, 1, 0, 0 AVX_INSTR subss, sse, 1, 0, 0 -AVX_INSTR ucomisd, sse2 -AVX_INSTR ucomiss, sse +AVX_INSTR ucomisd, sse2, 1 +AVX_INSTR ucomiss, sse, 1 AVX_INSTR unpckhpd, sse2, 1, 0, 0 AVX_INSTR unpckhps, sse, 1, 0, 0 AVX_INSTR unpcklpd, sse2, 1, 0, 0 @@ -1584,8 +1815,41 @@ AVX_INSTR xorps, sse, 1, 0, 1 ; 3DNow instructions, for sharing code between AVX, SSE and 3DN AVX_INSTR pfadd, 3dnow, 1, 0, 1 -AVX_INSTR pfsub, 3dnow, 1, 0, 0 AVX_INSTR pfmul, 3dnow, 1, 0, 1 +AVX_INSTR pfsub, 3dnow, 1, 0, 0 + +;%1 == instruction +;%2 == minimal instruction set +%macro GPR_INSTR 2 + %macro %1 2-5 fnord, %1, %2 + %ifdef cpuname + %if notcpuflag(%5) + %error use of ``%4'' %5 instruction in cpuname function: current_function + %endif + %endif + %ifidn %3, fnord + %4 %1, %2 + %else + %4 %1, %2, %3 + %endif + %endmacro +%endmacro + +GPR_INSTR andn, bmi1 +GPR_INSTR bextr, bmi1 +GPR_INSTR blsi, bmi1 +GPR_INSTR blsmsk, bmi1 +GPR_INSTR blsr, bmi1 +GPR_INSTR bzhi, bmi2 +GPR_INSTR crc32, sse42 +GPR_INSTR mulx, bmi2 +GPR_INSTR pdep, bmi2 +GPR_INSTR pext, bmi2 +GPR_INSTR popcnt, sse42 +GPR_INSTR rorx, bmi2 +GPR_INSTR sarx, bmi2 +GPR_INSTR shlx, bmi2 +GPR_INSTR shrx, bmi2 ; base-4 constants for shuffles %assign i 0 @@ -1618,15 +1882,11 @@ AVX_INSTR pfmul, 3dnow, 1, 0, 1 %endmacro %endmacro -FMA_INSTR pmacsww, pmullw, paddw -FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation -FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation +FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation +FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation +FMA_INSTR pmacsww, pmullw, paddw FMA_INSTR pmadcswd, pmaddwd, paddd -; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf. -; This lets us use tzcnt without bumping the yasm version requirement yet. -%define tzcnt rep bsf - ; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax. ; FMA3 is only possible if dst is the same as one of the src registers. ; Either src2 or src3 can be a memory operand. @@ -1687,6 +1947,11 @@ FMA4_INSTR fnmsub, pd, ps, sd, ss %assign %%evex_required 1 %endif %endif + %ifnum regnumof%3 + %if regnumof%3 >= 16 || sizeof%3 > 32 + %assign %%evex_required 1 + %endif + %endif %if %%evex_required %6 %%args %else @@ -1711,16 +1976,3 @@ EVEX_INSTR vrcpps, vrcp14ps, 1 ; EVEX versions have higher precision EVEX_INSTR vrcpss, vrcp14ss, 1 EVEX_INSTR vrsqrtps, vrsqrt14ps, 1 EVEX_INSTR vrsqrtss, vrsqrt14ss, 1 - -; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0) -%ifdef __YASM_VER__ - %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0 - %macro vpbroadcastq 2 - %if sizeof%1 == 16 - movddup %1, %2 - %else - vbroadcastsd %1, %2 - %endif - %endmacro - %endif -%endif |