aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/isa-l/erasure_code/gf_5vect_mad_avx512_gfni.asm
diff options
context:
space:
mode:
authorMaxim Yurchuk <maxim-yurchuk@ydb.tech>2024-10-18 20:31:38 +0300
committerGitHub <noreply@github.com>2024-10-18 20:31:38 +0300
commit2a74bac2d2d3bccb4e10120f1ead805640ec9dd0 (patch)
tree047e4818ced5aaf73f58517629e5260b5291f9f0 /contrib/libs/isa-l/erasure_code/gf_5vect_mad_avx512_gfni.asm
parent2d9656823e9521d8c29ea4c9a1d0eab78391abfc (diff)
parent3d834a1923bbf9403cd4a448e7f32b670aa4124f (diff)
downloadydb-2a74bac2d2d3bccb4e10120f1ead805640ec9dd0.tar.gz
Merge pull request #10502 from ydb-platform/mergelibs-241016-1210
Library import 241016-1210
Diffstat (limited to 'contrib/libs/isa-l/erasure_code/gf_5vect_mad_avx512_gfni.asm')
-rw-r--r--contrib/libs/isa-l/erasure_code/gf_5vect_mad_avx512_gfni.asm240
1 files changed, 240 insertions, 0 deletions
diff --git a/contrib/libs/isa-l/erasure_code/gf_5vect_mad_avx512_gfni.asm b/contrib/libs/isa-l/erasure_code/gf_5vect_mad_avx512_gfni.asm
new file mode 100644
index 0000000000..d89ecca970
--- /dev/null
+++ b/contrib/libs/isa-l/erasure_code/gf_5vect_mad_avx512_gfni.asm
@@ -0,0 +1,240 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2023 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;
+;;; gf_5vect_mad_avx512_gfni(len, vec, vec_i, mul_array, src, dest);
+;;;
+
+%include "reg_sizes.asm"
+%include "gf_vect_gfni.inc"
+
+%if AS_FEATURE_LEVEL >= 10
+
+%ifidn __OUTPUT_FORMAT__, elf64
+ %define arg0 rdi
+ %define arg1 rsi
+ %define arg2 rdx
+ %define arg3 rcx
+ %define arg4 r8
+ %define arg5 r9
+ %define tmp r11
+ %define tmp2 r10
+ %define func(x) x: endbranch
+ %define FUNC_SAVE
+ %define FUNC_RESTORE
+%endif
+
+%ifidn __OUTPUT_FORMAT__, win64
+ %define arg0 rcx
+ %define arg1 rdx
+ %define arg2 r8
+ %define arg3 r9
+ %define arg4 r12
+ %define arg5 r13
+ %define tmp r11
+ %define tmp2 r10
+ %define stack_size 16*10 + 3*8
+ %define arg(x) [rsp + stack_size + 8 + 8*x]
+ %define func(x) proc_frame x
+
+%macro FUNC_SAVE 0
+ sub rsp, stack_size
+ vmovdqa [rsp + 16*0], xmm6
+ vmovdqa [rsp + 16*1], xmm7
+ vmovdqa [rsp + 16*2], xmm8
+ vmovdqa [rsp + 16*3], xmm9
+ vmovdqa [rsp + 16*4], xmm10
+ vmovdqa [rsp + 16*5], xmm11
+ vmovdqa [rsp + 16*6], xmm12
+ vmovdqa [rsp + 16*7], xmm13
+ vmovdqa [rsp + 16*8], xmm14
+ vmovdqa [rsp + 16*9], xmm15
+ mov [rsp + 10*16 + 0*8], r12
+ mov [rsp + 10*16 + 1*8], r13
+ end_prolog
+ mov arg4, arg(4)
+ mov arg5, arg(5)
+%endmacro
+
+%macro FUNC_RESTORE 0
+ vmovdqa xmm6, [rsp + 16*0]
+ vmovdqa xmm7, [rsp + 16*1]
+ vmovdqa xmm8, [rsp + 16*2]
+ vmovdqa xmm9, [rsp + 16*3]
+ vmovdqa xmm10, [rsp + 16*4]
+ vmovdqa xmm11, [rsp + 16*5]
+ vmovdqa xmm12, [rsp + 16*6]
+ vmovdqa xmm13, [rsp + 16*7]
+ vmovdqa xmm14, [rsp + 16*8]
+ vmovdqa xmm15, [rsp + 16*9]
+ mov r12, [rsp + 10*16 + 0*8]
+ mov r13, [rsp + 10*16 + 1*8]
+ add rsp, stack_size
+%endmacro
+%endif
+
+%define len arg0
+%define vec arg1
+%define vec_i arg2
+%define mul_array arg3
+%define src arg4
+%define dest1 arg5
+%define pos rax
+%define dest2 tmp2
+%define dest3 mul_array
+%define dest4 vec
+%define dest5 vec_i
+
+%ifndef EC_ALIGNED_ADDR
+;;; Use Un-aligned load/store
+ %define XLDR vmovdqu8
+ %define XSTR vmovdqu8
+%else
+;;; Use Non-temporal load/stor
+ %ifdef NO_NT_LDST
+ %define XLDR vmovdqa64
+ %define XSTR vmovdqa64
+ %else
+ %define XLDR vmovntdqa
+ %define XSTR vmovntdq
+ %endif
+%endif
+
+default rel
+[bits 64]
+section .text
+
+%define x0 zmm0
+%define xd1 zmm1
+%define xd2 zmm2
+%define xd3 zmm3
+%define xd4 zmm4
+%define xd5 zmm5
+
+%define xgft1 zmm6
+%define xgft2 zmm7
+%define xgft3 zmm8
+%define xgft4 zmm9
+%define xgft5 zmm10
+
+%define xret1 zmm11
+%define xret2 zmm12
+%define xret3 zmm13
+%define xret4 zmm14
+%define xret5 zmm15
+
+;;
+;; Encodes 64 bytes of a single source into 5x 64 bytes (parity disks)
+;;
+%macro ENCODE_64B_5 0-1
+%define %%KMASK %1
+
+%if %0 == 1
+ vmovdqu8 x0{%%KMASK}, [src + pos] ;Get next source vector
+ vmovdqu8 xd1{%%KMASK}, [dest1 + pos] ;Get next dest vector
+ vmovdqu8 xd2{%%KMASK}, [dest2 + pos] ;Get next dest vector
+ vmovdqu8 xd3{%%KMASK}, [dest3 + pos] ;Get next dest vector
+ vmovdqu8 xd4{%%KMASK}, [dest4 + pos] ;Get next dest vector
+ vmovdqu8 xd5{%%KMASK}, [dest5 + pos] ;Get next dest vector
+%else
+ XLDR x0, [src + pos] ;Get next source vector
+ XLDR xd1, [dest1 + pos] ;Get next dest vector
+ XLDR xd2, [dest2 + pos] ;Get next dest vector
+ XLDR xd3, [dest3 + pos] ;Get next dest vector
+ XLDR xd4, [dest4 + pos] ;Get next dest vector
+ XLDR xd5, [dest5 + pos] ;Get next dest vector
+%endif
+
+ GF_MUL_XOR EVEX, x0, xgft1, xret1, xd1, xgft2, xret2, xd2, xgft3, xret3, xd3, \
+ xgft4, xret4, xd4, xgft5, xret5, xd5
+
+%if %0 == 1
+ vmovdqu8 [dest1 + pos]{%%KMASK}, xd1
+ vmovdqu8 [dest2 + pos]{%%KMASK}, xd2
+ vmovdqu8 [dest3 + pos]{%%KMASK}, xd3
+ vmovdqu8 [dest4 + pos]{%%KMASK}, xd4
+ vmovdqu8 [dest5 + pos]{%%KMASK}, xd5
+%else
+ XSTR [dest1 + pos], xd1
+ XSTR [dest2 + pos], xd2
+ XSTR [dest3 + pos], xd3
+ XSTR [dest4 + pos], xd4
+ XSTR [dest5 + pos], xd5
+%endif
+%endmacro
+align 16
+global gf_5vect_mad_avx512_gfni, function
+func(gf_5vect_mad_avx512_gfni)
+ FUNC_SAVE
+
+ xor pos, pos
+ shl vec_i, 3 ;Multiply by 8
+ shl vec, 3 ;Multiply by 8
+ lea tmp, [mul_array + vec_i]
+ vbroadcastf32x2 xgft1, [tmp]
+ vbroadcastf32x2 xgft2, [tmp + vec]
+ vbroadcastf32x2 xgft3, [tmp + vec*2]
+ vbroadcastf32x2 xgft5, [tmp + vec*4]
+ add tmp, vec
+ vbroadcastf32x2 xgft4, [tmp + vec*2]
+ mov dest2, [dest1 + 8]
+ mov dest3, [dest1 + 2*8] ; reuse mul_array
+ mov dest4, [dest1 + 3*8] ; reuse vec
+ mov dest5, [dest1 + 4*8] ; reuse vec_i
+ mov dest1, [dest1]
+
+ cmp len, 64
+ jl .len_lt_64
+.loop64:
+ ENCODE_64B_5
+
+ add pos, 64 ;Loop on 64 bytes at a time
+ sub len, 64
+ cmp len, 64
+ jge .loop64
+
+.len_lt_64:
+ cmp len, 0
+ jle .exit
+
+ xor tmp, tmp
+ bts tmp, len
+ dec tmp
+ kmovq k1, tmp
+
+ ENCODE_64B_5 k1
+
+.exit:
+ vzeroupper
+
+ FUNC_RESTORE
+ ret
+
+endproc_frame
+%endif ; if AS_FEATURE_LEVEL >= 10