1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
|
#include "arm_arch.h"
.text
.align 5
.globl __armv7_neon_probe
__armv7_neon_probe:
orr v15.16b, v15.16b, v15.16b
ret
.globl __armv7_tick
__armv7_tick:
#ifdef __APPLE__
mrs x0, CNTPCT_EL0
#else
mrs x0, CNTVCT_EL0
#endif
ret
.globl __armv8_aes_probe
__armv8_aes_probe:
aese v0.16b, v0.16b
ret
.globl __armv8_sha1_probe
__armv8_sha1_probe:
sha1h s0, s0
ret
.globl __armv8_sha256_probe
__armv8_sha256_probe:
sha256su0 v0.4s, v0.4s
ret
.globl __armv8_pmull_probe
__armv8_pmull_probe:
pmull v0.1q, v0.1d, v0.1d
ret
.globl __armv8_sha512_probe
__armv8_sha512_probe:
.long 0xcec08000 // sha512su0 v0.2d,v0.2d
ret
.globl _OPENSSL_cleanse
.align 5
_OPENSSL_cleanse:
cbz x1,Lret // len==0?
cmp x1,#15
b.hi Lot // len>15
nop
Little:
strb wzr,[x0],#1 // store byte-by-byte
subs x1,x1,#1
b.ne Little
Lret: ret
.align 4
Lot: tst x0,#7
b.eq Laligned // inp is aligned
strb wzr,[x0],#1 // store byte-by-byte
sub x1,x1,#1
b Lot
.align 4
Laligned:
str xzr,[x0],#8 // store word-by-word
sub x1,x1,#8
tst x1,#-8
b.ne Laligned // len>=8
cbnz x1,Little // len!=0?
ret
.globl _CRYPTO_memcmp
.align 4
_CRYPTO_memcmp:
eor w3,w3,w3
cbz x2,Lno_data // len==0?
cmp x2,#16
b.ne Loop_cmp
ldp x8,x9,[x0]
ldp x10,x11,[x1]
eor x8,x8,x10
eor x9,x9,x11
orr x8,x8,x9
mov x0,#1
cmp x8,#0
csel x0,xzr,x0,eq
ret
.align 4
Loop_cmp:
ldrb w4,[x0],#1
ldrb w5,[x1],#1
eor w4,w4,w5
orr w3,w3,w4
subs x2,x2,#1
b.ne Loop_cmp
Lno_data:
neg w0,w3
lsr w0,w0,#31
ret
|