diff options
author | tpashkin <tpashkin@yandex-team.ru> | 2022-02-10 16:46:41 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:46:41 +0300 |
commit | 5475379a04e37df30085bd1724f1c57e3f40996f (patch) | |
tree | 95d77e29785a3bd5be6260b1c9d226a551376ecf /contrib/libs/openssl/asm | |
parent | c3d34b9b40eb534dfd2c549342274f3d61844688 (diff) | |
download | ydb-5475379a04e37df30085bd1724f1c57e3f40996f.tar.gz |
Restoring authorship annotation for <tpashkin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/openssl/asm')
16 files changed, 415 insertions, 415 deletions
diff --git a/contrib/libs/openssl/asm/linux/crypto/aes/aesni-sha1-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/aes/aesni-sha1-x86_64.s index a38e21f048..b383f1689d 100644 --- a/contrib/libs/openssl/asm/linux/crypto/aes/aesni-sha1-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/aes/aesni-sha1-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha1_enc,@function .align 32 aesni_cbc_sha1_enc: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+0(%rip),%r10d movq OPENSSL_ia32cap_P+4(%rip),%r11 @@ -18,7 +18,7 @@ aesni_cbc_sha1_enc: je aesni_cbc_sha1_enc_avx jmp aesni_cbc_sha1_enc_ssse3 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc .type aesni_cbc_sha1_enc_ssse3,@function .align 32 @@ -2732,7 +2732,7 @@ K_XX_XX: .type aesni_cbc_sha1_enc_shaext,@function .align 32 aesni_cbc_sha1_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 movdqu (%r9),%xmm8 movd 16(%r9),%xmm9 @@ -3031,5 +3031,5 @@ aesni_cbc_sha1_enc_shaext: movdqu %xmm8,(%r9) movd %xmm9,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext diff --git a/contrib/libs/openssl/asm/linux/crypto/aes/aesni-sha256-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/aes/aesni-sha256-x86_64.s index 3e56a82578..0b9041cc45 100644 --- a/contrib/libs/openssl/asm/linux/crypto/aes/aesni-sha256-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/aes/aesni-sha256-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha256_enc,@function .align 16 aesni_cbc_sha256_enc: -.cfi_startproc +.cfi_startproc leaq OPENSSL_ia32cap_P(%rip),%r11 movl $1,%eax cmpq $0,%rdi @@ -31,7 +31,7 @@ aesni_cbc_sha256_enc: ud2 .Lprobe: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc .align 64 @@ -2530,15 +2530,15 @@ aesni_cbc_sha256_enc_avx2: vmovdqa %ymm4,0(%rsp) xorl %r14d,%r14d vmovdqa %ymm5,32(%rsp) - - movq 120(%rsp),%rsi -.cfi_def_cfa %rsi,8 + + movq 120(%rsp),%rsi +.cfi_def_cfa %rsi,8 leaq -64(%rsp),%rsp - - - - movq %rsi,-8(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 + + + + movq %rsi,-8(%rsp) +.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 movl %ebx,%esi vmovdqa %ymm6,0(%rsp) xorl %ecx,%esi @@ -2552,12 +2552,12 @@ aesni_cbc_sha256_enc_avx2: vmovdqu (%r13),%xmm9 vpinsrq $0,%r13,%xmm15,%xmm15 leaq -64(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08 - - pushq 64-8(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 - leaq 8(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 +.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08 + + pushq 64-8(%rsp) +.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 + leaq 8(%rsp),%rsp +.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 vpalignr $4,%ymm0,%ymm1,%ymm4 addl 0+128(%rsp),%r11d andl %r8d,%r12d @@ -2832,12 +2832,12 @@ aesni_cbc_sha256_enc_avx2: movl %r9d,%r12d vmovdqa %ymm6,32(%rsp) leaq -64(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08 - - pushq 64-8(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 - leaq 8(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 +.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08 + + pushq 64-8(%rsp) +.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 + leaq 8(%rsp),%rsp +.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 vpalignr $4,%ymm2,%ymm3,%ymm4 addl 0+128(%rsp),%r11d andl %r8d,%r12d @@ -4051,12 +4051,12 @@ aesni_cbc_sha256_enc_avx2: jbe .Loop_avx2 leaq (%rsp),%rbp - -.cfi_escape 0x0f,0x06,0x76,0xf8,0x00,0x06,0x23,0x08 - + +.cfi_escape 0x0f,0x06,0x76,0xf8,0x00,0x06,0x23,0x08 + .Ldone_avx2: - movq 64+32(%rbp),%r8 - movq 64+56(%rbp),%rsi + movq 64+32(%rbp),%r8 + movq 64+56(%rbp),%rsi .cfi_def_cfa %rsi,8 vmovdqu %xmm8,(%r8) vzeroall @@ -4081,7 +4081,7 @@ aesni_cbc_sha256_enc_avx2: .type aesni_cbc_sha256_enc_shaext,@function .align 32 aesni_cbc_sha256_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 leaq K256+128(%rip),%rax movdqu (%r9),%xmm1 @@ -4431,5 +4431,5 @@ aesni_cbc_sha256_enc_shaext: movdqu %xmm1,(%r9) movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext diff --git a/contrib/libs/openssl/asm/linux/crypto/aes/aesni-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/aes/aesni-x86_64.s index 1a4b22e7b8..5f25c1c81f 100644 --- a/contrib/libs/openssl/asm/linux/crypto/aes/aesni-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/aes/aesni-x86_64.s @@ -861,7 +861,7 @@ aesni_ecb_encrypt: .type aesni_ccm64_encrypt_blocks,@function .align 16 aesni_ccm64_encrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movdqu (%r8),%xmm6 movdqa .Lincrement64(%rip),%xmm9 @@ -920,13 +920,13 @@ aesni_ccm64_encrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks .globl aesni_ccm64_decrypt_blocks .type aesni_ccm64_decrypt_blocks,@function .align 16 aesni_ccm64_decrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movups (%r8),%xmm6 movdqu (%r9),%xmm3 @@ -1019,7 +1019,7 @@ aesni_ccm64_decrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks .globl aesni_ctr32_encrypt_blocks .type aesni_ctr32_encrypt_blocks,@function @@ -2794,7 +2794,7 @@ aesni_ocb_encrypt: .type __ocb_encrypt6,@function .align 32 __ocb_encrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2892,13 +2892,13 @@ __ocb_encrypt6: .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt6,.-__ocb_encrypt6 .type __ocb_encrypt4,@function .align 32 __ocb_encrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2963,13 +2963,13 @@ __ocb_encrypt4: .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt4,.-__ocb_encrypt4 .type __ocb_encrypt1,@function .align 32 __ocb_encrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm2,%xmm8 @@ -3000,7 +3000,7 @@ __ocb_encrypt1: .byte 102,15,56,221,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt1,.-__ocb_encrypt1 .globl aesni_ocb_decrypt @@ -3243,7 +3243,7 @@ aesni_ocb_decrypt: .type __ocb_decrypt6,@function .align 32 __ocb_decrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3335,13 +3335,13 @@ __ocb_decrypt6: .byte 102,65,15,56,223,246 .byte 102,65,15,56,223,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt6,.-__ocb_decrypt6 .type __ocb_decrypt4,@function .align 32 __ocb_decrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3402,13 +3402,13 @@ __ocb_decrypt4: .byte 102,65,15,56,223,228 .byte 102,65,15,56,223,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt4,.-__ocb_decrypt4 .type __ocb_decrypt1,@function .align 32 __ocb_decrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm7,%xmm2 @@ -3438,7 +3438,7 @@ __ocb_decrypt1: .byte 102,15,56,223,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt1,.-__ocb_decrypt1 .globl aesni_cbc_encrypt .type aesni_cbc_encrypt,@function @@ -4447,7 +4447,7 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_set_encrypt_key,.-aesni_set_encrypt_key .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key .align 64 diff --git a/contrib/libs/openssl/asm/linux/crypto/bn/rsaz-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/bn/rsaz-x86_64.s index d5025b23cd..f1037a427c 100644 --- a/contrib/libs/openssl/asm/linux/crypto/bn/rsaz-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/bn/rsaz-x86_64.s @@ -29,7 +29,7 @@ rsaz_512_sqr: subq $128+24,%rsp .cfi_adjust_cfa_offset 128+24 .Lsqr_body: -.byte 102,72,15,110,202 +.byte 102,72,15,110,202 movq (%rsi),%rdx movq 8(%rsi),%rax movq %rcx,128(%rsp) @@ -44,7 +44,7 @@ rsaz_512_sqr: movl %r8d,128+8(%rsp) movq %rdx,%rbx - movq %rax,%rbp + movq %rax,%rbp mulq %rdx movq %rax,%r8 movq 16(%rsi),%rax @@ -83,29 +83,29 @@ rsaz_512_sqr: mulq %rbx addq %rax,%r14 movq %rbx,%rax - adcq $0,%rdx + adcq $0,%rdx - xorq %rcx,%rcx + xorq %rcx,%rcx addq %r8,%r8 - movq %rdx,%r15 - adcq $0,%rcx + movq %rdx,%r15 + adcq $0,%rcx mulq %rax - addq %r8,%rdx - adcq $0,%rcx - + addq %r8,%rdx + adcq $0,%rcx + movq %rax,(%rsp) - movq %rdx,8(%rsp) + movq %rdx,8(%rsp) movq 16(%rsi),%rax - mulq %rbp + mulq %rbp addq %rax,%r10 movq 24(%rsi),%rax movq %rdx,%rbx adcq $0,%rbx - mulq %rbp + mulq %rbp addq %rax,%r11 movq 32(%rsi),%rax adcq $0,%rdx @@ -113,7 +113,7 @@ rsaz_512_sqr: movq %rdx,%rbx adcq $0,%rbx - mulq %rbp + mulq %rbp addq %rax,%r12 movq 40(%rsi),%rax adcq $0,%rdx @@ -121,7 +121,7 @@ rsaz_512_sqr: movq %rdx,%rbx adcq $0,%rbx - mulq %rbp + mulq %rbp addq %rax,%r13 movq 48(%rsi),%rax adcq $0,%rdx @@ -129,7 +129,7 @@ rsaz_512_sqr: movq %rdx,%rbx adcq $0,%rbx - mulq %rbp + mulq %rbp addq %rax,%r14 movq 56(%rsi),%rax adcq $0,%rdx @@ -137,39 +137,39 @@ rsaz_512_sqr: movq %rdx,%rbx adcq $0,%rbx - mulq %rbp + mulq %rbp addq %rax,%r15 - movq %rbp,%rax + movq %rbp,%rax adcq $0,%rdx addq %rbx,%r15 - adcq $0,%rdx - - xorq %rbx,%rbx - addq %r9,%r9 + adcq $0,%rdx + + xorq %rbx,%rbx + addq %r9,%r9 movq %rdx,%r8 - adcq %r10,%r10 - adcq $0,%rbx + adcq %r10,%r10 + adcq $0,%rbx - mulq %rax + mulq %rax - addq %rcx,%rax - movq 16(%rsi),%rbp + addq %rcx,%rax + movq 16(%rsi),%rbp addq %rax,%r9 - movq 24(%rsi),%rax + movq 24(%rsi),%rax adcq %rdx,%r10 - adcq $0,%rbx + adcq $0,%rbx movq %r9,16(%rsp) movq %r10,24(%rsp) - mulq %rbp + mulq %rbp addq %rax,%r12 movq 32(%rsi),%rax movq %rdx,%rcx adcq $0,%rcx - mulq %rbp + mulq %rbp addq %rax,%r13 movq 40(%rsi),%rax adcq $0,%rdx @@ -177,7 +177,7 @@ rsaz_512_sqr: movq %rdx,%rcx adcq $0,%rcx - mulq %rbp + mulq %rbp addq %rax,%r14 movq 48(%rsi),%rax adcq $0,%rdx @@ -185,7 +185,7 @@ rsaz_512_sqr: movq %rdx,%rcx adcq $0,%rcx - mulq %rbp + mulq %rbp addq %rax,%r15 movq 56(%rsi),%rax adcq $0,%rdx @@ -193,40 +193,40 @@ rsaz_512_sqr: movq %rdx,%rcx adcq $0,%rcx - mulq %rbp + mulq %rbp addq %rax,%r8 - movq %rbp,%rax + movq %rbp,%rax adcq $0,%rdx addq %rcx,%r8 - adcq $0,%rdx - - xorq %rcx,%rcx - addq %r11,%r11 + adcq $0,%rdx + + xorq %rcx,%rcx + addq %r11,%r11 movq %rdx,%r9 - adcq %r12,%r12 - adcq $0,%rcx + adcq %r12,%r12 + adcq $0,%rcx - mulq %rax + mulq %rax - addq %rbx,%rax - movq 24(%rsi),%r10 + addq %rbx,%rax + movq 24(%rsi),%r10 addq %rax,%r11 - movq 32(%rsi),%rax + movq 32(%rsi),%rax adcq %rdx,%r12 - adcq $0,%rcx + adcq $0,%rcx movq %r11,32(%rsp) movq %r12,40(%rsp) - movq %rax,%r11 + movq %rax,%r11 mulq %r10 addq %rax,%r14 movq 40(%rsi),%rax movq %rdx,%rbx adcq $0,%rbx - movq %rax,%r12 + movq %rax,%r12 mulq %r10 addq %rax,%r15 movq 48(%rsi),%rax @@ -235,7 +235,7 @@ rsaz_512_sqr: movq %rdx,%rbx adcq $0,%rbx - movq %rax,%rbp + movq %rax,%rbp mulq %r10 addq %rax,%r8 movq 56(%rsi),%rax @@ -249,21 +249,21 @@ rsaz_512_sqr: movq %r10,%rax adcq $0,%rdx addq %rbx,%r9 - adcq $0,%rdx - - xorq %rbx,%rbx - addq %r13,%r13 + adcq $0,%rdx + + xorq %rbx,%rbx + addq %r13,%r13 movq %rdx,%r10 - adcq %r14,%r14 - adcq $0,%rbx + adcq %r14,%r14 + adcq $0,%rbx - mulq %rax + mulq %rax - addq %rcx,%rax + addq %rcx,%rax addq %rax,%r13 - movq %r12,%rax + movq %r12,%rax adcq %rdx,%r14 - adcq $0,%rbx + adcq $0,%rbx movq %r13,48(%rsp) movq %r14,56(%rsp) @@ -271,7 +271,7 @@ rsaz_512_sqr: mulq %r11 addq %rax,%r8 - movq %rbp,%rax + movq %rbp,%rax movq %rdx,%rcx adcq $0,%rcx @@ -283,27 +283,27 @@ rsaz_512_sqr: movq %rdx,%rcx adcq $0,%rcx - movq %rax,%r14 + movq %rax,%r14 mulq %r11 addq %rax,%r10 movq %r11,%rax adcq $0,%rdx addq %rcx,%r10 - adcq $0,%rdx - - xorq %rcx,%rcx - addq %r15,%r15 + adcq $0,%rdx + + xorq %rcx,%rcx + addq %r15,%r15 movq %rdx,%r11 - adcq %r8,%r8 - adcq $0,%rcx + adcq %r8,%r8 + adcq $0,%rcx - mulq %rax + mulq %rax - addq %rbx,%rax + addq %rbx,%rax addq %rax,%r15 - movq %rbp,%rax + movq %rbp,%rax adcq %rdx,%r8 - adcq $0,%rcx + adcq $0,%rcx movq %r15,64(%rsp) movq %r8,72(%rsp) @@ -311,7 +311,7 @@ rsaz_512_sqr: mulq %r12 addq %rax,%r10 - movq %r14,%rax + movq %r14,%rax movq %rdx,%rbx adcq $0,%rbx @@ -320,58 +320,58 @@ rsaz_512_sqr: movq %r12,%rax adcq $0,%rdx addq %rbx,%r11 - adcq $0,%rdx - - xorq %rbx,%rbx - addq %r9,%r9 + adcq $0,%rdx + + xorq %rbx,%rbx + addq %r9,%r9 movq %rdx,%r12 - adcq %r10,%r10 - adcq $0,%rbx + adcq %r10,%r10 + adcq $0,%rbx - mulq %rax + mulq %rax - addq %rcx,%rax + addq %rcx,%rax addq %rax,%r9 - movq %r14,%rax + movq %r14,%rax adcq %rdx,%r10 - adcq $0,%rbx + adcq $0,%rbx movq %r9,80(%rsp) movq %r10,88(%rsp) - mulq %rbp + mulq %rbp addq %rax,%r12 - movq %rbp,%rax - adcq $0,%rdx - - xorq %rcx,%rcx - addq %r11,%r11 + movq %rbp,%rax + adcq $0,%rdx + + xorq %rcx,%rcx + addq %r11,%r11 movq %rdx,%r13 adcq %r12,%r12 - adcq $0,%rcx + adcq $0,%rcx mulq %rax - - addq %rbx,%rax + + addq %rbx,%rax addq %rax,%r11 - movq %r14,%rax + movq %r14,%rax adcq %rdx,%r12 - adcq $0,%rcx + adcq $0,%rcx movq %r11,96(%rsp) movq %r12,104(%rsp) - xorq %rbx,%rbx - addq %r13,%r13 - adcq $0,%rbx - + xorq %rbx,%rbx + addq %r13,%r13 + adcq $0,%rbx + mulq %rax - addq %rcx,%rax - addq %r13,%rax - adcq %rbx,%rdx + addq %rcx,%rax + addq %r13,%rax + adcq %rbx,%rdx movq (%rsp),%r8 movq 8(%rsp),%r9 @@ -381,11 +381,11 @@ rsaz_512_sqr: movq 40(%rsp),%r13 movq 48(%rsp),%r14 movq 56(%rsp),%r15 -.byte 102,72,15,126,205 - - movq %rax,112(%rsp) - movq %rdx,120(%rsp) +.byte 102,72,15,126,205 + movq %rax,112(%rsp) + movq %rdx,120(%rsp) + call __rsaz_512_reduce addq 64(%rsp),%r8 @@ -415,7 +415,7 @@ rsaz_512_sqr: .byte 102,72,15,110,199 mulxq %rax,%r8,%r9 - movq %rax,%rbx + movq %rax,%rbx mulxq 16(%rsi),%rcx,%r10 xorq %rbp,%rbp @@ -423,39 +423,39 @@ rsaz_512_sqr: mulxq 24(%rsi),%rax,%r11 adcxq %rcx,%r9 -.byte 0xc4,0x62,0xf3,0xf6,0xa6,0x20,0x00,0x00,0x00 +.byte 0xc4,0x62,0xf3,0xf6,0xa6,0x20,0x00,0x00,0x00 adcxq %rax,%r10 -.byte 0xc4,0x62,0xfb,0xf6,0xae,0x28,0x00,0x00,0x00 +.byte 0xc4,0x62,0xfb,0xf6,0xae,0x28,0x00,0x00,0x00 adcxq %rcx,%r11 - mulxq 48(%rsi),%rcx,%r14 + mulxq 48(%rsi),%rcx,%r14 adcxq %rax,%r12 adcxq %rcx,%r13 - mulxq 56(%rsi),%rax,%r15 + mulxq 56(%rsi),%rax,%r15 adcxq %rax,%r14 adcxq %rbp,%r15 - mulxq %rdx,%rax,%rdi - movq %rbx,%rdx - xorq %rcx,%rcx - adoxq %r8,%r8 - adcxq %rdi,%r8 - adoxq %rbp,%rcx - adcxq %rbp,%rcx + mulxq %rdx,%rax,%rdi + movq %rbx,%rdx + xorq %rcx,%rcx + adoxq %r8,%r8 + adcxq %rdi,%r8 + adoxq %rbp,%rcx + adcxq %rbp,%rcx movq %rax,(%rsp) movq %r8,8(%rsp) -.byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x10,0x00,0x00,0x00 +.byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x10,0x00,0x00,0x00 adoxq %rax,%r10 adcxq %rbx,%r11 - mulxq 24(%rsi),%rdi,%r8 + mulxq 24(%rsi),%rdi,%r8 adoxq %rdi,%r11 -.byte 0x66 +.byte 0x66 adcxq %r8,%r12 mulxq 32(%rsi),%rax,%rbx @@ -473,25 +473,25 @@ rsaz_512_sqr: .byte 0xc4,0x62,0xc3,0xf6,0x86,0x38,0x00,0x00,0x00 adoxq %rdi,%r15 adcxq %rbp,%r8 - mulxq %rdx,%rax,%rdi + mulxq %rdx,%rax,%rdi adoxq %rbp,%r8 -.byte 0x48,0x8b,0x96,0x10,0x00,0x00,0x00 +.byte 0x48,0x8b,0x96,0x10,0x00,0x00,0x00 - xorq %rbx,%rbx - adoxq %r9,%r9 + xorq %rbx,%rbx + adoxq %r9,%r9 - adcxq %rcx,%rax - adoxq %r10,%r10 + adcxq %rcx,%rax + adoxq %r10,%r10 adcxq %rax,%r9 - adoxq %rbp,%rbx - adcxq %rdi,%r10 - adcxq %rbp,%rbx + adoxq %rbp,%rbx + adcxq %rdi,%r10 + adcxq %rbp,%rbx movq %r9,16(%rsp) .byte 0x4c,0x89,0x94,0x24,0x18,0x00,0x00,0x00 - mulxq 24(%rsi),%rdi,%r9 + mulxq 24(%rsi),%rdi,%r9 adoxq %rdi,%r12 adcxq %r9,%r13 @@ -499,7 +499,7 @@ rsaz_512_sqr: adoxq %rax,%r13 adcxq %rcx,%r14 -.byte 0xc4,0x62,0xc3,0xf6,0x8e,0x28,0x00,0x00,0x00 +.byte 0xc4,0x62,0xc3,0xf6,0x8e,0x28,0x00,0x00,0x00 adoxq %rdi,%r14 adcxq %r9,%r15 @@ -507,28 +507,28 @@ rsaz_512_sqr: adoxq %rax,%r15 adcxq %rcx,%r8 - mulxq 56(%rsi),%rdi,%r9 + mulxq 56(%rsi),%rdi,%r9 adoxq %rdi,%r8 adcxq %rbp,%r9 - mulxq %rdx,%rax,%rdi + mulxq %rdx,%rax,%rdi adoxq %rbp,%r9 - movq 24(%rsi),%rdx + movq 24(%rsi),%rdx - xorq %rcx,%rcx - adoxq %r11,%r11 + xorq %rcx,%rcx + adoxq %r11,%r11 - adcxq %rbx,%rax - adoxq %r12,%r12 + adcxq %rbx,%rax + adoxq %r12,%r12 adcxq %rax,%r11 - adoxq %rbp,%rcx - adcxq %rdi,%r12 - adcxq %rbp,%rcx + adoxq %rbp,%rcx + adcxq %rdi,%r12 + adcxq %rbp,%rcx movq %r11,32(%rsp) - movq %r12,40(%rsp) + movq %r12,40(%rsp) - mulxq 32(%rsi),%rax,%rbx + mulxq 32(%rsi),%rax,%rbx adoxq %rax,%r14 adcxq %rbx,%r15 @@ -543,25 +543,25 @@ rsaz_512_sqr: mulxq 56(%rsi),%rdi,%r10 adoxq %rdi,%r9 adcxq %rbp,%r10 - mulxq %rdx,%rax,%rdi + mulxq %rdx,%rax,%rdi adoxq %rbp,%r10 - movq 32(%rsi),%rdx + movq 32(%rsi),%rdx - xorq %rbx,%rbx - adoxq %r13,%r13 + xorq %rbx,%rbx + adoxq %r13,%r13 - adcxq %rcx,%rax - adoxq %r14,%r14 + adcxq %rcx,%rax + adoxq %r14,%r14 adcxq %rax,%r13 - adoxq %rbp,%rbx - adcxq %rdi,%r14 - adcxq %rbp,%rbx + adoxq %rbp,%rbx + adcxq %rdi,%r14 + adcxq %rbp,%rbx movq %r13,48(%rsp) movq %r14,56(%rsp) - mulxq 40(%rsi),%rdi,%r11 + mulxq 40(%rsi),%rdi,%r11 adoxq %rdi,%r8 adcxq %r11,%r9 @@ -572,19 +572,19 @@ rsaz_512_sqr: mulxq 56(%rsi),%rdi,%r11 adoxq %rdi,%r10 adcxq %rbp,%r11 - mulxq %rdx,%rax,%rdi - movq 40(%rsi),%rdx + mulxq %rdx,%rax,%rdi + movq 40(%rsi),%rdx adoxq %rbp,%r11 - xorq %rcx,%rcx - adoxq %r15,%r15 + xorq %rcx,%rcx + adoxq %r15,%r15 - adcxq %rbx,%rax - adoxq %r8,%r8 + adcxq %rbx,%rax + adoxq %r8,%r8 adcxq %rax,%r15 - adoxq %rbp,%rcx - adcxq %rdi,%r8 - adcxq %rbp,%rcx + adoxq %rbp,%rcx + adcxq %rdi,%r8 + adcxq %rbp,%rcx movq %r15,64(%rsp) movq %r8,72(%rsp) @@ -597,19 +597,19 @@ rsaz_512_sqr: .byte 0xc4,0x62,0xc3,0xf6,0xa6,0x38,0x00,0x00,0x00 adoxq %rdi,%r11 adcxq %rbp,%r12 - mulxq %rdx,%rax,%rdi + mulxq %rdx,%rax,%rdi adoxq %rbp,%r12 - movq 48(%rsi),%rdx + movq 48(%rsi),%rdx - xorq %rbx,%rbx - adoxq %r9,%r9 + xorq %rbx,%rbx + adoxq %r9,%r9 - adcxq %rcx,%rax - adoxq %r10,%r10 + adcxq %rcx,%rax + adoxq %r10,%r10 adcxq %rax,%r9 - adcxq %rdi,%r10 - adoxq %rbp,%rbx - adcxq %rbp,%rbx + adcxq %rdi,%r10 + adoxq %rbp,%rbx + adcxq %rbp,%rbx movq %r9,80(%rsp) movq %r10,88(%rsp) @@ -619,30 +619,30 @@ rsaz_512_sqr: adoxq %rax,%r12 adoxq %rbp,%r13 - mulxq %rdx,%rax,%rdi - xorq %rcx,%rcx - movq 56(%rsi),%rdx - adoxq %r11,%r11 + mulxq %rdx,%rax,%rdi + xorq %rcx,%rcx + movq 56(%rsi),%rdx + adoxq %r11,%r11 - adcxq %rbx,%rax - adoxq %r12,%r12 + adcxq %rbx,%rax + adoxq %r12,%r12 adcxq %rax,%r11 - adoxq %rbp,%rcx - adcxq %rdi,%r12 - adcxq %rbp,%rcx + adoxq %rbp,%rcx + adcxq %rdi,%r12 + adcxq %rbp,%rcx .byte 0x4c,0x89,0x9c,0x24,0x60,0x00,0x00,0x00 .byte 0x4c,0x89,0xa4,0x24,0x68,0x00,0x00,0x00 mulxq %rdx,%rax,%rdx - xorq %rbx,%rbx - adoxq %r13,%r13 + xorq %rbx,%rbx + adoxq %r13,%r13 - adcxq %rcx,%rax - adoxq %rbp,%rbx - adcxq %r13,%rax - adcxq %rdx,%rbx + adcxq %rcx,%rax + adoxq %rbp,%rbx + adcxq %r13,%rax + adcxq %rdx,%rbx .byte 102,72,15,126,199 .byte 102,72,15,126,205 @@ -657,9 +657,9 @@ rsaz_512_sqr: movq 48(%rsp),%r14 movq 56(%rsp),%r15 - movq %rax,112(%rsp) - movq %rbx,120(%rsp) - + movq %rax,112(%rsp) + movq %rbx,120(%rsp) + call __rsaz_512_reducex addq 64(%rsp),%r8 @@ -1453,7 +1453,7 @@ rsaz_512_mul_by_one: .type __rsaz_512_reduce,@function .align 32 __rsaz_512_reduce: -.cfi_startproc +.cfi_startproc movq %r8,%rbx imulq 128+8(%rsp),%rbx movq 0(%rbp),%rax @@ -1533,12 +1533,12 @@ __rsaz_512_reduce: jne .Lreduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reduce,.-__rsaz_512_reduce .type __rsaz_512_reducex,@function .align 32 __rsaz_512_reducex: -.cfi_startproc +.cfi_startproc imulq %r8,%rdx xorq %rsi,%rsi @@ -1591,12 +1591,12 @@ __rsaz_512_reducex: jne .Lreduction_loopx .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reducex,.-__rsaz_512_reducex .type __rsaz_512_subtract,@function .align 32 __rsaz_512_subtract: -.cfi_startproc +.cfi_startproc movq %r8,(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) @@ -1650,12 +1650,12 @@ __rsaz_512_subtract: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_subtract,.-__rsaz_512_subtract .type __rsaz_512_mul,@function .align 32 __rsaz_512_mul: -.cfi_startproc +.cfi_startproc leaq 8(%rsp),%rdi movq (%rsi),%rax @@ -1794,12 +1794,12 @@ __rsaz_512_mul: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mul,.-__rsaz_512_mul .type __rsaz_512_mulx,@function .align 32 __rsaz_512_mulx: -.cfi_startproc +.cfi_startproc mulxq (%rsi),%rbx,%r8 movq $-6,%rcx @@ -1916,13 +1916,13 @@ __rsaz_512_mulx: movq %r15,8+64+56(%rsp) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mulx,.-__rsaz_512_mulx .globl rsaz_512_scatter4 .type rsaz_512_scatter4,@function .align 16 rsaz_512_scatter4: -.cfi_startproc +.cfi_startproc leaq (%rdi,%rdx,8),%rdi movl $8,%r9d jmp .Loop_scatter @@ -1935,14 +1935,14 @@ rsaz_512_scatter4: decl %r9d jnz .Loop_scatter .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size rsaz_512_scatter4,.-rsaz_512_scatter4 .globl rsaz_512_gather4 .type rsaz_512_gather4,@function .align 16 rsaz_512_gather4: -.cfi_startproc +.cfi_startproc movd %edx,%xmm8 movdqa .Linc+16(%rip),%xmm1 movdqa .Linc(%rip),%xmm0 @@ -2006,7 +2006,7 @@ rsaz_512_gather4: jnz .Loop_gather .byte 0xf3,0xc3 .LSEH_end_rsaz_512_gather4: -.cfi_endproc +.cfi_endproc .size rsaz_512_gather4,.-rsaz_512_gather4 .align 64 diff --git a/contrib/libs/openssl/asm/linux/crypto/bn/x86_64-mont5.s b/contrib/libs/openssl/asm/linux/crypto/bn/x86_64-mont5.s index ab93b02d8c..7e3471358b 100644 --- a/contrib/libs/openssl/asm/linux/crypto/bn/x86_64-mont5.s +++ b/contrib/libs/openssl/asm/linux/crypto/bn/x86_64-mont5.s @@ -550,7 +550,7 @@ bn_mul4x_mont_gather5: .type mul4x_internal,@function .align 32 mul4x_internal: -.cfi_startproc +.cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax @@ -1072,7 +1072,7 @@ mul4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry -.cfi_endproc +.cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5 .type bn_power5,@function @@ -1215,7 +1215,7 @@ bn_power5: .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: -.cfi_startproc +.cfi_startproc @@ -1990,12 +1990,12 @@ __bn_sqr8x_reduction: cmpq %rdx,%rdi jb .L8x_reduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx @@ -2046,18 +2046,18 @@ __bn_post4x_internal: movq %r9,%r10 negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_from_montgomery .type bn_from_montgomery,@function .align 32 bn_from_montgomery: -.cfi_startproc +.cfi_startproc testl $7,%r9d jz bn_from_mont8x xorl %eax,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_from_montgomery,.-bn_from_montgomery .type bn_from_mont8x,@function @@ -2341,7 +2341,7 @@ bn_mulx4x_mont_gather5: .type mulx4x_internal,@function .align 32 mulx4x_internal: -.cfi_startproc +.cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 @@ -2760,7 +2760,7 @@ mulx4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry -.cfi_endproc +.cfi_endproc .size mulx4x_internal,.-mulx4x_internal .type bn_powerx5,@function .align 32 @@ -3519,7 +3519,7 @@ __bn_sqrx8x_reduction: .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 __bn_postx4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 @@ -3567,13 +3567,13 @@ __bn_postx4x_internal: negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_get_bits5 .type bn_get_bits5,@function .align 16 bn_get_bits5: -.cfi_startproc +.cfi_startproc leaq 0(%rdi),%r10 leaq 1(%rdi),%r11 movl %esi,%ecx @@ -3587,14 +3587,14 @@ bn_get_bits5: shrl %cl,%eax andl $31,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_get_bits5,.-bn_get_bits5 .globl bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: -.cfi_startproc +.cfi_startproc cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx @@ -3607,7 +3607,7 @@ bn_scatter5: jnz .Lscatter .Lscatter_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 @@ -3615,7 +3615,7 @@ bn_scatter5: .align 32 bn_gather5: .LSEH_begin_bn_gather5: -.cfi_startproc +.cfi_startproc .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 @@ -3773,7 +3773,7 @@ bn_gather5: leaq (%r10),%rsp .byte 0xf3,0xc3 .LSEH_end_bn_gather5: -.cfi_endproc +.cfi_endproc .size bn_gather5,.-bn_gather5 .align 64 .Linc: diff --git a/contrib/libs/openssl/asm/linux/crypto/camellia/cmll-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/camellia/cmll-x86_64.s index 92056f8b1e..14c3ed985f 100644 --- a/contrib/libs/openssl/asm/linux/crypto/camellia/cmll-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/camellia/cmll-x86_64.s @@ -5,13 +5,13 @@ .type Camellia_EncryptBlock,@function .align 16 Camellia_EncryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Lenc_rounds -.cfi_endproc +.cfi_endproc .size Camellia_EncryptBlock,.-Camellia_EncryptBlock .globl Camellia_EncryptBlock_Rounds @@ -85,7 +85,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,@function .align 16 _x86_64_Camellia_encrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -288,7 +288,7 @@ _x86_64_Camellia_encrypt: movl %edx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt @@ -296,13 +296,13 @@ _x86_64_Camellia_encrypt: .type Camellia_DecryptBlock,@function .align 16 Camellia_DecryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Ldec_rounds -.cfi_endproc +.cfi_endproc .size Camellia_DecryptBlock,.-Camellia_DecryptBlock .globl Camellia_DecryptBlock_Rounds @@ -376,7 +376,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,@function .align 16 _x86_64_Camellia_decrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -580,7 +580,7 @@ _x86_64_Camellia_decrypt: movl %ebx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt .globl Camellia_Ekeygen .type Camellia_Ekeygen,@function diff --git a/contrib/libs/openssl/asm/linux/crypto/ec/ecp_nistz256-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/ec/ecp_nistz256-x86_64.s index 80569cae04..3729201221 100644 --- a/contrib/libs/openssl/asm/linux/crypto/ec/ecp_nistz256-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/ec/ecp_nistz256-x86_64.s @@ -3874,12 +3874,12 @@ ecp_nistz256_ord_sqr_montx: .type ecp_nistz256_to_mont,@function .align 32 ecp_nistz256_to_mont: -.cfi_startproc +.cfi_startproc movl $0x80100,%ecx andl OPENSSL_ia32cap_P+8(%rip),%ecx leaq .LRR(%rip),%rdx jmp .Lmul_mont -.cfi_endproc +.cfi_endproc .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont @@ -4823,7 +4823,7 @@ ecp_nistz256_from_mont: .type ecp_nistz256_scatter_w5,@function .align 32 ecp_nistz256_scatter_w5: -.cfi_startproc +.cfi_startproc leal -3(%rdx,%rdx,2),%edx movdqa 0(%rsi),%xmm0 shll $5,%edx @@ -4840,7 +4840,7 @@ ecp_nistz256_scatter_w5: movdqa %xmm5,80(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5 @@ -4914,7 +4914,7 @@ ecp_nistz256_gather_w5: .type ecp_nistz256_scatter_w7,@function .align 32 ecp_nistz256_scatter_w7: -.cfi_startproc +.cfi_startproc movdqu 0(%rsi),%xmm0 shll $6,%edx movdqu 16(%rsi),%xmm1 @@ -4926,7 +4926,7 @@ ecp_nistz256_scatter_w7: movdqa %xmm3,48(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7 @@ -5663,12 +5663,12 @@ ecp_nistz256_point_add: .byte 102,73,15,126,208 .byte 102,73,15,126,217 - - orq %r8,%r12 - orq %r9,%r12 - - -.byte 0x3e + + orq %r8,%r12 + orq %r9,%r12 + + +.byte 0x3e jnz .Ladd_proceedq .Ladd_doubleq: @@ -6772,12 +6772,12 @@ ecp_nistz256_point_addx: .byte 102,73,15,126,208 .byte 102,73,15,126,217 - - orq %r8,%r12 - orq %r9,%r12 - - -.byte 0x3e + + orq %r8,%r12 + orq %r9,%r12 + + +.byte 0x3e jnz .Ladd_proceedx .Ladd_doublex: diff --git a/contrib/libs/openssl/asm/linux/crypto/ec/x25519-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/ec/x25519-x86_64.s index 8fd319c83c..6247dc89d0 100644 --- a/contrib/libs/openssl/asm/linux/crypto/ec/x25519-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/ec/x25519-x86_64.s @@ -400,14 +400,14 @@ x25519_fe51_mul121666: .type x25519_fe64_eligible,@function .align 32 x25519_fe64_eligible: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+8(%rip),%ecx xorl %eax,%eax andl $0x80100,%ecx cmpl $0x80100,%ecx cmovel %ecx,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_eligible,.-x25519_fe64_eligible .globl x25519_fe64_mul @@ -650,7 +650,7 @@ x25519_fe64_sqr: .align 32 x25519_fe64_mul121666: .Lfe64_mul121666_body: -.cfi_startproc +.cfi_startproc movl $121666,%edx mulxq 0(%rsi),%r8,%rcx mulxq 8(%rsi),%r9,%rax @@ -679,7 +679,7 @@ x25519_fe64_mul121666: .Lfe64_mul121666_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_mul121666,.-x25519_fe64_mul121666 .globl x25519_fe64_add @@ -687,7 +687,7 @@ x25519_fe64_mul121666: .align 32 x25519_fe64_add: .Lfe64_add_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -716,7 +716,7 @@ x25519_fe64_add: .Lfe64_add_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_add,.-x25519_fe64_add .globl x25519_fe64_sub @@ -724,7 +724,7 @@ x25519_fe64_add: .align 32 x25519_fe64_sub: .Lfe64_sub_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -753,7 +753,7 @@ x25519_fe64_sub: .Lfe64_sub_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_sub,.-x25519_fe64_sub .globl x25519_fe64_tobytes @@ -761,7 +761,7 @@ x25519_fe64_sub: .align 32 x25519_fe64_tobytes: .Lfe64_to_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -797,6 +797,6 @@ x25519_fe64_tobytes: .Lfe64_to_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_tobytes,.-x25519_fe64_tobytes .byte 88,50,53,53,49,57,32,112,114,105,109,105,116,105,118,101,115,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/contrib/libs/openssl/asm/linux/crypto/modes/aesni-gcm-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/modes/aesni-gcm-x86_64.s index bf508aff6f..3a3b864f84 100644 --- a/contrib/libs/openssl/asm/linux/crypto/modes/aesni-gcm-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/modes/aesni-gcm-x86_64.s @@ -3,7 +3,7 @@ .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: -.cfi_startproc +.cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 @@ -311,7 +311,7 @@ _aesni_ctr32_ghash_6x: vpxor %xmm4,%xmm8,%xmm8 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .type aesni_gcm_decrypt,@function @@ -418,7 +418,7 @@ aesni_gcm_decrypt: .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: -.cfi_startproc +.cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%rbp),%r13 @@ -505,7 +505,7 @@ _aesni_ctr32_6x: vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt diff --git a/contrib/libs/openssl/asm/linux/crypto/poly1305/poly1305-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/poly1305/poly1305-x86_64.s index 9bb9be4632..2b127b33cb 100644 --- a/contrib/libs/openssl/asm/linux/crypto/poly1305/poly1305-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/poly1305/poly1305-x86_64.s @@ -12,7 +12,7 @@ .type poly1305_init,@function .align 32 poly1305_init: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -48,7 +48,7 @@ poly1305_init: movl $1,%eax .Lno_key: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init,.-poly1305_init .type poly1305_blocks,@function @@ -169,7 +169,7 @@ poly1305_blocks: .type poly1305_emit,@function .align 32 poly1305_emit: -.cfi_startproc +.cfi_startproc .Lemit: movq 0(%rdi),%r8 movq 8(%rdi),%r9 @@ -190,12 +190,12 @@ poly1305_emit: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit,.-poly1305_emit .type __poly1305_block,@function .align 32 __poly1305_block: -.cfi_startproc +.cfi_startproc mulq %r14 movq %rax,%r9 movq %r11,%rax @@ -235,13 +235,13 @@ __poly1305_block: adcq $0,%rbx adcq $0,%rbp .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,@function .align 32 __poly1305_init_avx: -.cfi_startproc +.cfi_startproc movq %r11,%r14 movq %r12,%rbx xorq %rbp,%rbp @@ -399,7 +399,7 @@ __poly1305_init_avx: leaq -48-64(%rdi),%rdi .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_init_avx,.-__poly1305_init_avx .type poly1305_blocks_avx,@function @@ -1240,7 +1240,7 @@ poly1305_blocks_avx: .type poly1305_emit_avx,@function .align 32 poly1305_emit_avx: -.cfi_startproc +.cfi_startproc cmpl $0,20(%rdi) je .Lemit @@ -1291,7 +1291,7 @@ poly1305_emit_avx: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_avx,.-poly1305_emit_avx .type poly1305_blocks_avx2,@function .align 32 @@ -2488,7 +2488,7 @@ poly1305_blocks_avx512: .type poly1305_init_base2_44,@function .align 32 poly1305_init_base2_44: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -2522,12 +2522,12 @@ poly1305_init_base2_44: movq %r11,8(%rdx) movl $1,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init_base2_44,.-poly1305_init_base2_44 .type poly1305_blocks_vpmadd52,@function .align 32 poly1305_blocks_vpmadd52: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52 @@ -2634,12 +2634,12 @@ poly1305_blocks_vpmadd52: .Lno_data_vpmadd52: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 .type poly1305_blocks_vpmadd52_4x,@function .align 32 poly1305_blocks_vpmadd52_4x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_4x @@ -3064,12 +3064,12 @@ poly1305_blocks_vpmadd52_4x: .Lno_data_vpmadd52_4x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x .type poly1305_blocks_vpmadd52_8x,@function .align 32 poly1305_blocks_vpmadd52_8x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_8x @@ -3410,12 +3410,12 @@ poly1305_blocks_vpmadd52_8x: .Lno_data_vpmadd52_8x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x .type poly1305_emit_base2_44,@function .align 32 poly1305_emit_base2_44: -.cfi_startproc +.cfi_startproc movq 0(%rdi),%r8 movq 8(%rdi),%r9 movq 16(%rdi),%r10 @@ -3446,7 +3446,7 @@ poly1305_emit_base2_44: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_base2_44,.-poly1305_emit_base2_44 .align 64 .Lconst: @@ -3485,7 +3485,7 @@ poly1305_emit_base2_44: .type xor128_encrypt_n_pad,@function .align 16 xor128_encrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3527,14 +3527,14 @@ xor128_encrypt_n_pad: .Ldone_enc: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad .type xor128_decrypt_n_pad,@function .align 16 xor128_decrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3580,5 +3580,5 @@ xor128_decrypt_n_pad: .Ldone_dec: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad diff --git a/contrib/libs/openssl/asm/linux/crypto/rc4/rc4-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/rc4/rc4-x86_64.s index d1d1eece70..6d64f841d1 100644 --- a/contrib/libs/openssl/asm/linux/crypto/rc4/rc4-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/rc4/rc4-x86_64.s @@ -4,9 +4,9 @@ .globl RC4 .type RC4,@function .align 16 -RC4: -.cfi_startproc - orq %rsi,%rsi +RC4: +.cfi_startproc + orq %rsi,%rsi jne .Lentry .byte 0xf3,0xc3 .Lentry: @@ -534,7 +534,7 @@ RC4: .type RC4_set_key,@function .align 16 RC4_set_key: -.cfi_startproc +.cfi_startproc leaq 8(%rdi),%rdi leaq (%rdx,%rsi,1),%rdx negq %rsi @@ -601,14 +601,14 @@ RC4_set_key: movl %eax,-8(%rdi) movl %eax,-4(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size RC4_set_key,.-RC4_set_key .globl RC4_options .type RC4_options,@function .align 16 RC4_options: -.cfi_startproc +.cfi_startproc leaq .Lopts(%rip),%rax movl OPENSSL_ia32cap_P(%rip),%edx btl $20,%edx @@ -621,7 +621,7 @@ RC4_options: addq $12,%rax .Ldone: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .align 64 .Lopts: .byte 114,99,52,40,56,120,44,105,110,116,41,0 diff --git a/contrib/libs/openssl/asm/linux/crypto/sha/keccak1600-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/sha/keccak1600-x86_64.s index 11f26e933d..6e90445a67 100644 --- a/contrib/libs/openssl/asm/linux/crypto/sha/keccak1600-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/sha/keccak1600-x86_64.s @@ -3,7 +3,7 @@ .type __KeccakF1600,@function .align 32 __KeccakF1600: -.cfi_startproc +.cfi_startproc movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx @@ -256,7 +256,7 @@ __KeccakF1600: leaq -192(%r15),%r15 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .type KeccakF1600,@function diff --git a/contrib/libs/openssl/asm/linux/crypto/sha/sha1-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/sha/sha1-x86_64.s index d4efc7206f..daa461635e 100644 --- a/contrib/libs/openssl/asm/linux/crypto/sha/sha1-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/sha/sha1-x86_64.s @@ -1421,7 +1421,7 @@ _shaext_shortcut: pshufd $27,%xmm1,%xmm1 movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) - .byte 0xf3,0xc3 + .byte 0xf3,0xc3 .cfi_endproc .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function diff --git a/contrib/libs/openssl/asm/linux/crypto/sha/sha256-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/sha/sha256-x86_64.s index a7b60900fd..3ee0605cc7 100644 --- a/contrib/libs/openssl/asm/linux/crypto/sha/sha256-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/sha/sha256-x86_64.s @@ -1775,7 +1775,7 @@ K256: .align 64 sha256_block_data_order_shaext: _shaext_shortcut: -.cfi_startproc +.cfi_startproc leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 @@ -1978,7 +1978,7 @@ _shaext_shortcut: movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext .type sha256_block_data_order_ssse3,@function .align 64 @@ -4240,15 +4240,15 @@ sha256_block_data_order_avx2: vmovdqa %ymm4,0(%rsp) xorl %r14d,%r14d vmovdqa %ymm5,32(%rsp) - - movq 88(%rsp),%rdi -.cfi_def_cfa %rdi,8 + + movq 88(%rsp),%rdi +.cfi_def_cfa %rdi,8 leaq -64(%rsp),%rsp - - - - movq %rdi,-8(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 + + + + movq %rdi,-8(%rsp) +.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 movl %ebx,%edi vmovdqa %ymm6,0(%rsp) xorl %ecx,%edi @@ -4260,12 +4260,12 @@ sha256_block_data_order_avx2: .align 16 .Lavx2_00_47: leaq -64(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08 - - pushq 64-8(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 - leaq 8(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 +.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08 + + pushq 64-8(%rsp) +.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 + leaq 8(%rsp),%rsp +.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 vpalignr $4,%ymm0,%ymm1,%ymm4 addl 0+128(%rsp),%r11d andl %r8d,%r12d @@ -4521,12 +4521,12 @@ sha256_block_data_order_avx2: movl %r9d,%r12d vmovdqa %ymm6,32(%rsp) leaq -64(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08 - - pushq 64-8(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 - leaq 8(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 +.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08 + + pushq 64-8(%rsp) +.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 + leaq 8(%rsp),%rsp +.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 vpalignr $4,%ymm2,%ymm3,%ymm4 addl 0+128(%rsp),%r11d andl %r8d,%r12d @@ -5402,8 +5402,8 @@ sha256_block_data_order_avx2: leaq 448(%rsp),%rsp -.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 - +.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 + addl 0(%rdi),%eax addl 4(%rdi),%ebx addl 8(%rdi),%ecx @@ -5429,11 +5429,11 @@ sha256_block_data_order_avx2: jbe .Loop_avx2 leaq (%rsp),%rbp - -.cfi_escape 0x0f,0x06,0x76,0xd8,0x00,0x06,0x23,0x08 - + +.cfi_escape 0x0f,0x06,0x76,0xd8,0x00,0x06,0x23,0x08 + .Ldone_avx2: - movq 88(%rbp),%rsi + movq 88(%rbp),%rsi .cfi_def_cfa %rsi,8 vzeroupper movq -48(%rsi),%r15 diff --git a/contrib/libs/openssl/asm/linux/crypto/sha/sha512-x86_64.s b/contrib/libs/openssl/asm/linux/crypto/sha/sha512-x86_64.s index 939f1ca71c..89874b7deb 100644 --- a/contrib/libs/openssl/asm/linux/crypto/sha/sha512-x86_64.s +++ b/contrib/libs/openssl/asm/linux/crypto/sha/sha512-x86_64.s @@ -4165,15 +4165,15 @@ sha512_block_data_order_avx2: vmovdqa %ymm10,64(%rsp) vpaddq 64(%rbp),%ymm6,%ymm10 vmovdqa %ymm11,96(%rsp) - - movq 152(%rsp),%rdi -.cfi_def_cfa %rdi,8 + + movq 152(%rsp),%rdi +.cfi_def_cfa %rdi,8 leaq -128(%rsp),%rsp - - - - movq %rdi,-8(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 + + + + movq %rdi,-8(%rsp) +.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 vpaddq 96(%rbp),%ymm7,%ymm11 vmovdqa %ymm8,0(%rsp) xorq %r14,%r14 @@ -4189,12 +4189,12 @@ sha512_block_data_order_avx2: .align 16 .Lavx2_00_47: leaq -128(%rsp),%rsp -.cfi_escape 0x0f,0x06,0x77,0xf8,0x00,0x06,0x23,0x08 - - pushq 128-8(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 - leaq 8(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 +.cfi_escape 0x0f,0x06,0x77,0xf8,0x00,0x06,0x23,0x08 + + pushq 128-8(%rsp) +.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 + leaq 8(%rsp),%rsp +.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 vpalignr $8,%ymm0,%ymm1,%ymm8 addq 0+256(%rsp),%r11 andq %r8,%r12 @@ -4488,12 +4488,12 @@ sha512_block_data_order_avx2: movq %r9,%r12 vmovdqa %ymm10,96(%rsp) leaq -128(%rsp),%rsp -.cfi_escape 0x0f,0x06,0x77,0xf8,0x00,0x06,0x23,0x08 - - pushq 128-8(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 - leaq 8(%rsp),%rsp -.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 +.cfi_escape 0x0f,0x06,0x77,0xf8,0x00,0x06,0x23,0x08 + + pushq 128-8(%rsp) +.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08 + leaq 8(%rsp),%rsp +.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08 vpalignr $8,%ymm4,%ymm5,%ymm8 addq 0+256(%rsp),%r11 andq %r8,%r12 @@ -5407,8 +5407,8 @@ sha512_block_data_order_avx2: leaq 1152(%rsp),%rsp -.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08 - +.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08 + addq 0(%rdi),%rax addq 8(%rdi),%rbx addq 16(%rdi),%rcx @@ -5434,11 +5434,11 @@ sha512_block_data_order_avx2: jbe .Loop_avx2 leaq (%rsp),%rbp - -.cfi_escape 0x0f,0x06,0x76,0x98,0x01,0x06,0x23,0x08 - + +.cfi_escape 0x0f,0x06,0x76,0x98,0x01,0x06,0x23,0x08 + .Ldone_avx2: - movq 152(%rbp),%rsi + movq 152(%rbp),%rsi .cfi_def_cfa %rsi,8 vzeroupper movq -48(%rsi),%r15 diff --git a/contrib/libs/openssl/asm/linux/crypto/x86_64cpuid.s b/contrib/libs/openssl/asm/linux/crypto/x86_64cpuid.s index 748e6d161f..425245734b 100644 --- a/contrib/libs/openssl/asm/linux/crypto/x86_64cpuid.s +++ b/contrib/libs/openssl/asm/linux/crypto/x86_64cpuid.s @@ -12,7 +12,7 @@ .type OPENSSL_atomic_add,@function .align 16 OPENSSL_atomic_add: -.cfi_startproc +.cfi_startproc movl (%rdi),%eax .Lspin: leaq (%rsi,%rax,1),%r8 .byte 0xf0 @@ -21,19 +21,19 @@ OPENSSL_atomic_add: movl %r8d,%eax .byte 0x48,0x98 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_atomic_add,.-OPENSSL_atomic_add .globl OPENSSL_rdtsc .type OPENSSL_rdtsc,@function .align 16 OPENSSL_rdtsc: -.cfi_startproc +.cfi_startproc rdtsc shlq $32,%rdx orq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_rdtsc,.-OPENSSL_rdtsc .globl OPENSSL_ia32_cpuid @@ -209,7 +209,7 @@ OPENSSL_ia32_cpuid: .type OPENSSL_cleanse,@function .align 16 OPENSSL_cleanse: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $15,%rsi jae .Lot @@ -239,14 +239,14 @@ OPENSSL_cleanse: cmpq $0,%rsi jne .Little .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_cleanse,.-OPENSSL_cleanse .globl CRYPTO_memcmp .type CRYPTO_memcmp,@function .align 16 CRYPTO_memcmp: -.cfi_startproc +.cfi_startproc xorq %rax,%rax xorq %r10,%r10 cmpq $0,%rdx @@ -275,13 +275,13 @@ CRYPTO_memcmp: shrq $63,%rax .Lno_data: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size CRYPTO_memcmp,.-CRYPTO_memcmp .globl OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,@function .align 16 OPENSSL_wipe_cpu: -.cfi_startproc +.cfi_startproc pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -308,13 +308,13 @@ OPENSSL_wipe_cpu: xorq %r11,%r11 leaq 8(%rsp),%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu .globl OPENSSL_instrument_bus .type OPENSSL_instrument_bus,@function .align 16 OPENSSL_instrument_bus: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rsi,%r11 @@ -341,14 +341,14 @@ OPENSSL_instrument_bus: movq %r11,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus,.-OPENSSL_instrument_bus .globl OPENSSL_instrument_bus2 .type OPENSSL_instrument_bus2,@function .align 16 OPENSSL_instrument_bus2: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rdx,%r11 @@ -391,13 +391,13 @@ OPENSSL_instrument_bus2: movq 8(%rsp),%rax subq %rcx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2 .globl OPENSSL_ia32_rdrand_bytes .type OPENSSL_ia32_rdrand_bytes,@function .align 16 OPENSSL_ia32_rdrand_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdrand_bytes @@ -434,13 +434,13 @@ OPENSSL_ia32_rdrand_bytes: .Ldone_rdrand_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdrand_bytes,.-OPENSSL_ia32_rdrand_bytes .globl OPENSSL_ia32_rdseed_bytes .type OPENSSL_ia32_rdseed_bytes,@function .align 16 OPENSSL_ia32_rdseed_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdseed_bytes @@ -477,5 +477,5 @@ OPENSSL_ia32_rdseed_bytes: .Ldone_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdseed_bytes,.-OPENSSL_ia32_rdseed_bytes |