1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
|
#include "kyber512r3_consts_avx2.h"
// The small macros (.inc files) are combined with .S files directly
/*****.include "fq.inc"*****/
/***************************/
.macro red16 r,rs=0,x=12
vpmulhw %ymm1,%ymm\r,%ymm\x
.if \rs
vpmulhrsw %ymm\rs,%ymm\x,%ymm\x
.else
vpsraw $10,%ymm\x,%ymm\x
.endif
vpmullw %ymm0,%ymm\x,%ymm\x
vpsubw %ymm\x,%ymm\r,%ymm\r
.endm
.macro csubq r,x=12
vpsubw %ymm0,%ymm\r,%ymm\r
vpsraw $15,%ymm\r,%ymm\x
vpand %ymm0,%ymm\x,%ymm\x
vpaddw %ymm\x,%ymm\r,%ymm\r
.endm
.macro caddq r,x=12
vpsraw $15,%ymm\r,%ymm\x
vpand %ymm0,%ymm\x,%ymm\x
vpaddw %ymm\x,%ymm\r,%ymm\r
.endm
.macro fqmulprecomp al,ah,b,x=12
vpmullw %ymm\al,%ymm\b,%ymm\x
vpmulhw %ymm\ah,%ymm\b,%ymm\b
vpmulhw %ymm0,%ymm\x,%ymm\x
vpsubw %ymm\x,%ymm\b,%ymm\b
.endm
/***************************/
/*****.include "shuffle.inc"*****/
/********************************/
.macro shuffle8 r0,r1,r2,r3
vperm2i128 $0x20,%ymm\r1,%ymm\r0,%ymm\r2
vperm2i128 $0x31,%ymm\r1,%ymm\r0,%ymm\r3
.endm
.macro shuffle4 r0,r1,r2,r3
vpunpcklqdq %ymm\r1,%ymm\r0,%ymm\r2
vpunpckhqdq %ymm\r1,%ymm\r0,%ymm\r3
.endm
.macro shuffle2 r0,r1,r2,r3
#vpsllq $32,%ymm\r1,%ymm\r2
vmovsldup %ymm\r1,%ymm\r2
vpblendd $0xAA,%ymm\r2,%ymm\r0,%ymm\r2
vpsrlq $32,%ymm\r0,%ymm\r0
#vmovshdup %ymm\r0,%ymm\r0
vpblendd $0xAA,%ymm\r1,%ymm\r0,%ymm\r3
.endm
.macro shuffle1 r0,r1,r2,r3
vpslld $16,%ymm\r1,%ymm\r2
vpblendw $0xAA,%ymm\r2,%ymm\r0,%ymm\r2
vpsrld $16,%ymm\r0,%ymm\r0
vpblendw $0xAA,%ymm\r1,%ymm\r0,%ymm\r3
.endm
/********************************/
.text
nttunpack128_avx:
#load
vmovdqa (%rdi),%ymm4
vmovdqa 32(%rdi),%ymm5
vmovdqa 64(%rdi),%ymm6
vmovdqa 96(%rdi),%ymm7
vmovdqa 128(%rdi),%ymm8
vmovdqa 160(%rdi),%ymm9
vmovdqa 192(%rdi),%ymm10
vmovdqa 224(%rdi),%ymm11
shuffle8 4,8,3,8
shuffle8 5,9,4,9
shuffle8 6,10,5,10
shuffle8 7,11,6,11
shuffle4 3,5,7,5
shuffle4 8,10,3,10
shuffle4 4,6,8,6
shuffle4 9,11,4,11
shuffle2 7,8,9,8
shuffle2 5,6,7,6
shuffle2 3,4,5,4
shuffle2 10,11,3,11
shuffle1 9,5,10,5
shuffle1 8,4,9,4
shuffle1 7,3,8,3
shuffle1 6,11,7,11
#store
vmovdqa %ymm10,(%rdi)
vmovdqa %ymm5,32(%rdi)
vmovdqa %ymm9,64(%rdi)
vmovdqa %ymm4,96(%rdi)
vmovdqa %ymm8,128(%rdi)
vmovdqa %ymm3,160(%rdi)
vmovdqa %ymm7,192(%rdi)
vmovdqa %ymm11,224(%rdi)
ret
.global cdecl(nttunpack_avx2_asm)
cdecl(nttunpack_avx2_asm):
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
ret
ntttobytes128_avx:
#load
vmovdqa (%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vmovdqa 64(%rsi),%ymm7
vmovdqa 96(%rsi),%ymm8
vmovdqa 128(%rsi),%ymm9
vmovdqa 160(%rsi),%ymm10
vmovdqa 192(%rsi),%ymm11
vmovdqa 224(%rsi),%ymm12
#csubq
csubq 5,13
csubq 6,13
csubq 7,13
csubq 8,13
csubq 9,13
csubq 10,13
csubq 11,13
csubq 12,13
#bitpack
vpsllw $12,%ymm6,%ymm4
vpor %ymm4,%ymm5,%ymm4
vpsrlw $4,%ymm6,%ymm5
vpsllw $8,%ymm7,%ymm6
vpor %ymm5,%ymm6,%ymm5
vpsrlw $8,%ymm7,%ymm6
vpsllw $4,%ymm8,%ymm7
vpor %ymm6,%ymm7,%ymm6
vpsllw $12,%ymm10,%ymm7
vpor %ymm7,%ymm9,%ymm7
vpsrlw $4,%ymm10,%ymm8
vpsllw $8,%ymm11,%ymm9
vpor %ymm8,%ymm9,%ymm8
vpsrlw $8,%ymm11,%ymm9
vpsllw $4,%ymm12,%ymm10
vpor %ymm9,%ymm10,%ymm9
shuffle1 4,5,3,5
shuffle1 6,7,4,7
shuffle1 8,9,6,9
shuffle2 3,4,8,4
shuffle2 6,5,3,5
shuffle2 7,9,6,9
shuffle4 8,3,7,3
shuffle4 6,4,8,4
shuffle4 5,9,6,9
shuffle8 7,8,5,8
shuffle8 6,3,7,3
shuffle8 4,9,6,9
#store
vmovdqu %ymm5,(%rdi)
vmovdqu %ymm7,32(%rdi)
vmovdqu %ymm6,64(%rdi)
vmovdqu %ymm8,96(%rdi)
vmovdqu %ymm3,128(%rdi)
vmovdqu %ymm9,160(%rdi)
ret
.global cdecl(ntttobytes_avx2_asm)
cdecl(ntttobytes_avx2_asm):
#consts
vmovdqa _16XQ*2(%rdx),%ymm0
call ntttobytes128_avx
add $256,%rsi
add $192,%rdi
call ntttobytes128_avx
ret
nttfrombytes128_avx:
#load
vmovdqu (%rsi),%ymm4
vmovdqu 32(%rsi),%ymm5
vmovdqu 64(%rsi),%ymm6
vmovdqu 96(%rsi),%ymm7
vmovdqu 128(%rsi),%ymm8
vmovdqu 160(%rsi),%ymm9
shuffle8 4,7,3,7
shuffle8 5,8,4,8
shuffle8 6,9,5,9
shuffle4 3,8,6,8
shuffle4 7,5,3,5
shuffle4 4,9,7,9
shuffle2 6,5,4,5
shuffle2 8,7,6,7
shuffle2 3,9,8,9
shuffle1 4,7,10,7
shuffle1 5,8,4,8
shuffle1 6,9,5,9
#bitunpack
vpsrlw $12,%ymm10,%ymm11
vpsllw $4,%ymm7,%ymm12
vpor %ymm11,%ymm12,%ymm11
vpand %ymm0,%ymm10,%ymm10
vpand %ymm0,%ymm11,%ymm11
vpsrlw $8,%ymm7,%ymm12
vpsllw $8,%ymm4,%ymm13
vpor %ymm12,%ymm13,%ymm12
vpand %ymm0,%ymm12,%ymm12
vpsrlw $4,%ymm4,%ymm13
vpand %ymm0,%ymm13,%ymm13
vpsrlw $12,%ymm8,%ymm14
vpsllw $4,%ymm5,%ymm15
vpor %ymm14,%ymm15,%ymm14
vpand %ymm0,%ymm8,%ymm8
vpand %ymm0,%ymm14,%ymm14
vpsrlw $8,%ymm5,%ymm15
vpsllw $8,%ymm9,%ymm1
vpor %ymm15,%ymm1,%ymm15
vpand %ymm0,%ymm15,%ymm15
vpsrlw $4,%ymm9,%ymm1
vpand %ymm0,%ymm1,%ymm1
#store
vmovdqa %ymm10,(%rdi)
vmovdqa %ymm11,32(%rdi)
vmovdqa %ymm12,64(%rdi)
vmovdqa %ymm13,96(%rdi)
vmovdqa %ymm8,128(%rdi)
vmovdqa %ymm14,160(%rdi)
vmovdqa %ymm15,192(%rdi)
vmovdqa %ymm1,224(%rdi)
ret
.global cdecl(nttfrombytes_avx2_asm)
cdecl(nttfrombytes_avx2_asm):
#consts
vmovdqa _16XMASK*2(%rdx),%ymm0
call nttfrombytes128_avx
add $256,%rdi
add $192,%rsi
call nttfrombytes128_avx
ret
|