aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/go/_std_1.21/src/crypto/sha256
diff options
context:
space:
mode:
authormixa108 <mixa108@yandex-team.com>2023-11-16 19:41:57 +0300
committermixa108 <mixa108@yandex-team.com>2023-11-16 20:39:42 +0300
commit177b2aacb85a58acd19ca2e22d79c6030cd4c68b (patch)
tree948ffeb9b1aa2a741b4a89fbecdffee6460f1031 /contrib/go/_std_1.21/src/crypto/sha256
parentb098c151d43da3ece1440d49d69bf76061429aeb (diff)
downloadydb-177b2aacb85a58acd19ca2e22d79c6030cd4c68b.tar.gz
bump go ver 1.21.3
bump go to 1.21.3 build go.conf Golang 1.20.6 -> 1.21.3 init Golang 1.20.6 -> 1.21.3: copy blame
Diffstat (limited to 'contrib/go/_std_1.21/src/crypto/sha256')
-rw-r--r--contrib/go/_std_1.21/src/crypto/sha256/sha256.go275
-rw-r--r--contrib/go/_std_1.21/src/crypto/sha256/sha256block.go128
-rw-r--r--contrib/go/_std_1.21/src/crypto/sha256/sha256block_amd64.go10
-rw-r--r--contrib/go/_std_1.21/src/crypto/sha256/sha256block_amd64.s1173
-rw-r--r--contrib/go/_std_1.21/src/crypto/sha256/sha256block_arm64.go21
-rw-r--r--contrib/go/_std_1.21/src/crypto/sha256/sha256block_arm64.s119
-rw-r--r--contrib/go/_std_1.21/src/crypto/sha256/sha256block_decl.go10
-rw-r--r--contrib/go/_std_1.21/src/crypto/sha256/ya.make30
8 files changed, 1766 insertions, 0 deletions
diff --git a/contrib/go/_std_1.21/src/crypto/sha256/sha256.go b/contrib/go/_std_1.21/src/crypto/sha256/sha256.go
new file mode 100644
index 0000000000..2deafbc9fc
--- /dev/null
+++ b/contrib/go/_std_1.21/src/crypto/sha256/sha256.go
@@ -0,0 +1,275 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined
+// in FIPS 180-4.
+package sha256
+
+import (
+ "crypto"
+ "crypto/internal/boring"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.SHA224, New224)
+ crypto.RegisterHash(crypto.SHA256, New)
+}
+
+// The size of a SHA256 checksum in bytes.
+const Size = 32
+
+// The size of a SHA224 checksum in bytes.
+const Size224 = 28
+
+// The blocksize of SHA256 and SHA224 in bytes.
+const BlockSize = 64
+
+const (
+ chunk = 64
+ init0 = 0x6A09E667
+ init1 = 0xBB67AE85
+ init2 = 0x3C6EF372
+ init3 = 0xA54FF53A
+ init4 = 0x510E527F
+ init5 = 0x9B05688C
+ init6 = 0x1F83D9AB
+ init7 = 0x5BE0CD19
+ init0_224 = 0xC1059ED8
+ init1_224 = 0x367CD507
+ init2_224 = 0x3070DD17
+ init3_224 = 0xF70E5939
+ init4_224 = 0xFFC00B31
+ init5_224 = 0x68581511
+ init6_224 = 0x64F98FA7
+ init7_224 = 0xBEFA4FA4
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+ h [8]uint32
+ x [chunk]byte
+ nx int
+ len uint64
+ is224 bool // mark if this digest is SHA-224
+}
+
+const (
+ magic224 = "sha\x02"
+ magic256 = "sha\x03"
+ marshaledSize = len(magic256) + 8*4 + chunk + 8
+)
+
+func (d *digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ if d.is224 {
+ b = append(b, magic224...)
+ } else {
+ b = append(b, magic256...)
+ }
+ b = binary.BigEndian.AppendUint32(b, d.h[0])
+ b = binary.BigEndian.AppendUint32(b, d.h[1])
+ b = binary.BigEndian.AppendUint32(b, d.h[2])
+ b = binary.BigEndian.AppendUint32(b, d.h[3])
+ b = binary.BigEndian.AppendUint32(b, d.h[4])
+ b = binary.BigEndian.AppendUint32(b, d.h[5])
+ b = binary.BigEndian.AppendUint32(b, d.h[6])
+ b = binary.BigEndian.AppendUint32(b, d.h[7])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-d.nx] // already zero
+ b = binary.BigEndian.AppendUint64(b, d.len)
+ return b, nil
+}
+
+func (d *digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic224) || (d.is224 && string(b[:len(magic224)]) != magic224) || (!d.is224 && string(b[:len(magic256)]) != magic256) {
+ return errors.New("crypto/sha256: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("crypto/sha256: invalid hash state size")
+ }
+ b = b[len(magic224):]
+ b, d.h[0] = consumeUint32(b)
+ b, d.h[1] = consumeUint32(b)
+ b, d.h[2] = consumeUint32(b)
+ b, d.h[3] = consumeUint32(b)
+ b, d.h[4] = consumeUint32(b)
+ b, d.h[5] = consumeUint32(b)
+ b, d.h[6] = consumeUint32(b)
+ b, d.h[7] = consumeUint32(b)
+ b = b[copy(d.x[:], b):]
+ b, d.len = consumeUint64(b)
+ d.nx = int(d.len % chunk)
+ return nil
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ _ = b[7]
+ x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ return b[8:], x
+}
+
+func consumeUint32(b []byte) ([]byte, uint32) {
+ _ = b[3]
+ x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ return b[4:], x
+}
+
+func (d *digest) Reset() {
+ if !d.is224 {
+ d.h[0] = init0
+ d.h[1] = init1
+ d.h[2] = init2
+ d.h[3] = init3
+ d.h[4] = init4
+ d.h[5] = init5
+ d.h[6] = init6
+ d.h[7] = init7
+ } else {
+ d.h[0] = init0_224
+ d.h[1] = init1_224
+ d.h[2] = init2_224
+ d.h[3] = init3_224
+ d.h[4] = init4_224
+ d.h[5] = init5_224
+ d.h[6] = init6_224
+ d.h[7] = init7_224
+ }
+ d.nx = 0
+ d.len = 0
+}
+
+// New returns a new hash.Hash computing the SHA256 checksum. The Hash
+// also implements encoding.BinaryMarshaler and
+// encoding.BinaryUnmarshaler to marshal and unmarshal the internal
+// state of the hash.
+func New() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA256()
+ }
+ d := new(digest)
+ d.Reset()
+ return d
+}
+
+// New224 returns a new hash.Hash computing the SHA224 checksum.
+func New224() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA224()
+ }
+ d := new(digest)
+ d.is224 = true
+ d.Reset()
+ return d
+}
+
+func (d *digest) Size() int {
+ if !d.is224 {
+ return Size
+ }
+ return Size224
+}
+
+func (d *digest) BlockSize() int { return BlockSize }
+
+func (d *digest) Write(p []byte) (nn int, err error) {
+ boring.Unreachable()
+ nn = len(p)
+ d.len += uint64(nn)
+ if d.nx > 0 {
+ n := copy(d.x[d.nx:], p)
+ d.nx += n
+ if d.nx == chunk {
+ block(d, d.x[:])
+ d.nx = 0
+ }
+ p = p[n:]
+ }
+ if len(p) >= chunk {
+ n := len(p) &^ (chunk - 1)
+ block(d, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ d.nx = copy(d.x[:], p)
+ }
+ return
+}
+
+func (d *digest) Sum(in []byte) []byte {
+ boring.Unreachable()
+ // Make a copy of d so that caller can keep writing and summing.
+ d0 := *d
+ hash := d0.checkSum()
+ if d0.is224 {
+ return append(in, hash[:Size224]...)
+ }
+ return append(in, hash[:]...)
+}
+
+func (d *digest) checkSum() [Size]byte {
+ len := d.len
+ // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
+ var tmp [64 + 8]byte // padding + length buffer
+ tmp[0] = 0x80
+ var t uint64
+ if len%64 < 56 {
+ t = 56 - len%64
+ } else {
+ t = 64 + 56 - len%64
+ }
+
+ // Length in bits.
+ len <<= 3
+ padlen := tmp[:t+8]
+ binary.BigEndian.PutUint64(padlen[t+0:], len)
+ d.Write(padlen)
+
+ if d.nx != 0 {
+ panic("d.nx != 0")
+ }
+
+ var digest [Size]byte
+
+ binary.BigEndian.PutUint32(digest[0:], d.h[0])
+ binary.BigEndian.PutUint32(digest[4:], d.h[1])
+ binary.BigEndian.PutUint32(digest[8:], d.h[2])
+ binary.BigEndian.PutUint32(digest[12:], d.h[3])
+ binary.BigEndian.PutUint32(digest[16:], d.h[4])
+ binary.BigEndian.PutUint32(digest[20:], d.h[5])
+ binary.BigEndian.PutUint32(digest[24:], d.h[6])
+ if !d.is224 {
+ binary.BigEndian.PutUint32(digest[28:], d.h[7])
+ }
+
+ return digest
+}
+
+// Sum256 returns the SHA256 checksum of the data.
+func Sum256(data []byte) [Size]byte {
+ if boring.Enabled {
+ return boring.SHA256(data)
+ }
+ var d digest
+ d.Reset()
+ d.Write(data)
+ return d.checkSum()
+}
+
+// Sum224 returns the SHA224 checksum of the data.
+func Sum224(data []byte) [Size224]byte {
+ if boring.Enabled {
+ return boring.SHA224(data)
+ }
+ var d digest
+ d.is224 = true
+ d.Reset()
+ d.Write(data)
+ sum := d.checkSum()
+ ap := (*[Size224]byte)(sum[:])
+ return *ap
+}
diff --git a/contrib/go/_std_1.21/src/crypto/sha256/sha256block.go b/contrib/go/_std_1.21/src/crypto/sha256/sha256block.go
new file mode 100644
index 0000000000..bd2f9da93c
--- /dev/null
+++ b/contrib/go/_std_1.21/src/crypto/sha256/sha256block.go
@@ -0,0 +1,128 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// SHA256 block step.
+// In its own file so that a faster assembly or C version
+// can be substituted easily.
+
+package sha256
+
+import "math/bits"
+
+var _K = []uint32{
+ 0x428a2f98,
+ 0x71374491,
+ 0xb5c0fbcf,
+ 0xe9b5dba5,
+ 0x3956c25b,
+ 0x59f111f1,
+ 0x923f82a4,
+ 0xab1c5ed5,
+ 0xd807aa98,
+ 0x12835b01,
+ 0x243185be,
+ 0x550c7dc3,
+ 0x72be5d74,
+ 0x80deb1fe,
+ 0x9bdc06a7,
+ 0xc19bf174,
+ 0xe49b69c1,
+ 0xefbe4786,
+ 0x0fc19dc6,
+ 0x240ca1cc,
+ 0x2de92c6f,
+ 0x4a7484aa,
+ 0x5cb0a9dc,
+ 0x76f988da,
+ 0x983e5152,
+ 0xa831c66d,
+ 0xb00327c8,
+ 0xbf597fc7,
+ 0xc6e00bf3,
+ 0xd5a79147,
+ 0x06ca6351,
+ 0x14292967,
+ 0x27b70a85,
+ 0x2e1b2138,
+ 0x4d2c6dfc,
+ 0x53380d13,
+ 0x650a7354,
+ 0x766a0abb,
+ 0x81c2c92e,
+ 0x92722c85,
+ 0xa2bfe8a1,
+ 0xa81a664b,
+ 0xc24b8b70,
+ 0xc76c51a3,
+ 0xd192e819,
+ 0xd6990624,
+ 0xf40e3585,
+ 0x106aa070,
+ 0x19a4c116,
+ 0x1e376c08,
+ 0x2748774c,
+ 0x34b0bcb5,
+ 0x391c0cb3,
+ 0x4ed8aa4a,
+ 0x5b9cca4f,
+ 0x682e6ff3,
+ 0x748f82ee,
+ 0x78a5636f,
+ 0x84c87814,
+ 0x8cc70208,
+ 0x90befffa,
+ 0xa4506ceb,
+ 0xbef9a3f7,
+ 0xc67178f2,
+}
+
+func blockGeneric(dig *digest, p []byte) {
+ var w [64]uint32
+ h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
+ for len(p) >= chunk {
+ // Can interlace the computation of w with the
+ // rounds below if needed for speed.
+ for i := 0; i < 16; i++ {
+ j := i * 4
+ w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
+ }
+ for i := 16; i < 64; i++ {
+ v1 := w[i-2]
+ t1 := (bits.RotateLeft32(v1, -17)) ^ (bits.RotateLeft32(v1, -19)) ^ (v1 >> 10)
+ v2 := w[i-15]
+ t2 := (bits.RotateLeft32(v2, -7)) ^ (bits.RotateLeft32(v2, -18)) ^ (v2 >> 3)
+ w[i] = t1 + w[i-7] + t2 + w[i-16]
+ }
+
+ a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7
+
+ for i := 0; i < 64; i++ {
+ t1 := h + ((bits.RotateLeft32(e, -6)) ^ (bits.RotateLeft32(e, -11)) ^ (bits.RotateLeft32(e, -25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i]
+
+ t2 := ((bits.RotateLeft32(a, -2)) ^ (bits.RotateLeft32(a, -13)) ^ (bits.RotateLeft32(a, -22))) + ((a & b) ^ (a & c) ^ (b & c))
+
+ h = g
+ g = f
+ f = e
+ e = d + t1
+ d = c
+ c = b
+ b = a
+ a = t1 + t2
+ }
+
+ h0 += a
+ h1 += b
+ h2 += c
+ h3 += d
+ h4 += e
+ h5 += f
+ h6 += g
+ h7 += h
+
+ p = p[chunk:]
+ }
+
+ dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
+}
diff --git a/contrib/go/_std_1.21/src/crypto/sha256/sha256block_amd64.go b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_amd64.go
new file mode 100644
index 0000000000..b5d2c9b574
--- /dev/null
+++ b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_amd64.go
@@ -0,0 +1,10 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha256
+
+import "internal/cpu"
+
+var useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2
+var useSHA = useAVX2 && cpu.X86.HasSHA
diff --git a/contrib/go/_std_1.21/src/crypto/sha256/sha256block_amd64.s b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_amd64.s
new file mode 100644
index 0000000000..bbde6285d1
--- /dev/null
+++ b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_amd64.s
@@ -0,0 +1,1173 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// SHA256 block routine. See sha256block.go for Go equivalent.
+//
+// The algorithm is detailed in FIPS 180-4:
+//
+// https://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf
+
+// The avx2-version is described in an Intel White-Paper:
+// "Fast SHA-256 Implementations on Intel Architecture Processors"
+// To find it, surf to http://www.intel.com/p/en_US/embedded
+// and search for that title.
+// AVX2 version by Intel, same algorithm as code in Linux kernel:
+// https://github.com/torvalds/linux/blob/master/arch/x86/crypto/sha256-avx2-asm.S
+// by
+// James Guilford <james.guilford@intel.com>
+// Kirk Yap <kirk.s.yap@intel.com>
+// Tim Chen <tim.c.chen@linux.intel.com>
+
+// Wt = Mt; for 0 <= t <= 15
+// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63
+//
+// a = H0
+// b = H1
+// c = H2
+// d = H3
+// e = H4
+// f = H5
+// g = H6
+// h = H7
+//
+// for t = 0 to 63 {
+// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt
+// T2 = BIGSIGMA0(a) + Maj(a,b,c)
+// h = g
+// g = f
+// f = e
+// e = d + T1
+// d = c
+// c = b
+// b = a
+// a = T1 + T2
+// }
+//
+// H0 = a + H0
+// H1 = b + H1
+// H2 = c + H2
+// H3 = d + H3
+// H4 = e + H4
+// H5 = f + H5
+// H6 = g + H6
+// H7 = h + H7
+
+// Wt = Mt; for 0 <= t <= 15
+#define MSGSCHEDULE0(index) \
+ MOVL (index*4)(SI), AX; \
+ BSWAPL AX; \
+ MOVL AX, (index*4)(BP)
+
+// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63
+// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x)
+// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x)
+#define MSGSCHEDULE1(index) \
+ MOVL ((index-2)*4)(BP), AX; \
+ MOVL AX, CX; \
+ RORL $17, AX; \
+ MOVL CX, DX; \
+ RORL $19, CX; \
+ SHRL $10, DX; \
+ MOVL ((index-15)*4)(BP), BX; \
+ XORL CX, AX; \
+ MOVL BX, CX; \
+ XORL DX, AX; \
+ RORL $7, BX; \
+ MOVL CX, DX; \
+ SHRL $3, DX; \
+ RORL $18, CX; \
+ ADDL ((index-7)*4)(BP), AX; \
+ XORL CX, BX; \
+ XORL DX, BX; \
+ ADDL ((index-16)*4)(BP), BX; \
+ ADDL BX, AX; \
+ MOVL AX, ((index)*4)(BP)
+
+// Calculate T1 in AX - uses AX, CX and DX registers.
+// h is also used as an accumulator. Wt is passed in AX.
+// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt
+// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x)
+// Ch(x, y, z) = (x AND y) XOR (NOT x AND z)
+#define SHA256T1(const, e, f, g, h) \
+ ADDL AX, h; \
+ MOVL e, AX; \
+ ADDL $const, h; \
+ MOVL e, CX; \
+ RORL $6, AX; \
+ MOVL e, DX; \
+ RORL $11, CX; \
+ XORL CX, AX; \
+ MOVL e, CX; \
+ RORL $25, DX; \
+ ANDL f, CX; \
+ XORL AX, DX; \
+ MOVL e, AX; \
+ NOTL AX; \
+ ADDL DX, h; \
+ ANDL g, AX; \
+ XORL CX, AX; \
+ ADDL h, AX
+
+// Calculate T2 in BX - uses BX, CX, DX and DI registers.
+// T2 = BIGSIGMA0(a) + Maj(a, b, c)
+// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x)
+// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z)
+#define SHA256T2(a, b, c) \
+ MOVL a, DI; \
+ MOVL c, BX; \
+ RORL $2, DI; \
+ MOVL a, DX; \
+ ANDL b, BX; \
+ RORL $13, DX; \
+ MOVL a, CX; \
+ ANDL c, CX; \
+ XORL DX, DI; \
+ XORL CX, BX; \
+ MOVL a, DX; \
+ MOVL b, CX; \
+ RORL $22, DX; \
+ ANDL a, CX; \
+ XORL CX, BX; \
+ XORL DX, DI; \
+ ADDL DI, BX
+
+// Calculate T1 and T2, then e = d + T1 and a = T1 + T2.
+// The values for e and a are stored in d and h, ready for rotation.
+#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \
+ SHA256T1(const, e, f, g, h); \
+ SHA256T2(a, b, c); \
+ MOVL BX, h; \
+ ADDL AX, d; \
+ ADDL AX, h
+
+#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \
+ MSGSCHEDULE0(index); \
+ SHA256ROUND(index, const, a, b, c, d, e, f, g, h)
+
+#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \
+ MSGSCHEDULE1(index); \
+ SHA256ROUND(index, const, a, b, c, d, e, f, g, h)
+
+
+// Definitions for AVX2 version
+
+// addm (mem), reg
+// Add reg to mem using reg-mem add and store
+#define addm(P1, P2) \
+ ADDL P2, P1; \
+ MOVL P1, P2
+
+#define XDWORD0 Y4
+#define XDWORD1 Y5
+#define XDWORD2 Y6
+#define XDWORD3 Y7
+
+#define XWORD0 X4
+#define XWORD1 X5
+#define XWORD2 X6
+#define XWORD3 X7
+
+#define XTMP0 Y0
+#define XTMP1 Y1
+#define XTMP2 Y2
+#define XTMP3 Y3
+#define XTMP4 Y8
+#define XTMP5 Y11
+
+#define XFER Y9
+
+#define BYTE_FLIP_MASK Y13 // mask to convert LE -> BE
+#define X_BYTE_FLIP_MASK X13
+
+#define NUM_BYTES DX
+#define INP DI
+
+#define CTX SI // Beginning of digest in memory (a, b, c, ... , h)
+
+#define a AX
+#define b BX
+#define c CX
+#define d R8
+#define e DX
+#define f R9
+#define g R10
+#define h R11
+
+#define old_h R11
+
+#define TBL BP
+
+#define SRND SI // SRND is same register as CTX
+
+#define T1 R12
+
+#define y0 R13
+#define y1 R14
+#define y2 R15
+#define y3 DI
+
+// Offsets
+#define XFER_SIZE 2*64*4
+#define INP_END_SIZE 8
+#define INP_SIZE 8
+
+#define _XFER 0
+#define _INP_END _XFER + XFER_SIZE
+#define _INP _INP_END + INP_END_SIZE
+#define STACK_SIZE _INP + INP_SIZE
+
+#define ROUND_AND_SCHED_N_0(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
+ ; \ // ############################# RND N + 0 ############################//
+ MOVL a, y3; \ // y3 = a // MAJA
+ RORXL $25, e, y0; \ // y0 = e >> 25 // S1A
+ RORXL $11, e, y1; \ // y1 = e >> 11 // S1B
+ ; \
+ ADDL (disp + 0*4)(SP)(SRND*1), h; \ // h = k + w + h // disp = k + w
+ ORL c, y3; \ // y3 = a|c // MAJA
+ VPALIGNR $4, XDWORD2, XDWORD3, XTMP0; \ // XTMP0 = W[-7]
+ MOVL f, y2; \ // y2 = f // CH
+ RORXL $13, a, T1; \ // T1 = a >> 13 // S0B
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1
+ XORL g, y2; \ // y2 = f^g // CH
+ VPADDD XDWORD0, XTMP0, XTMP0; \ // XTMP0 = W[-7] + W[-16] // y1 = (e >> 6) // S1
+ RORXL $6, e, y1; \ // y1 = (e >> 6) // S1
+ ; \
+ ANDL e, y2; \ // y2 = (f^g)&e // CH
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1
+ RORXL $22, a, y1; \ // y1 = a >> 22 // S0A
+ ADDL h, d; \ // d = k + w + h + d // --
+ ; \
+ ANDL b, y3; \ // y3 = (a|c)&b // MAJA
+ VPALIGNR $4, XDWORD0, XDWORD1, XTMP1; \ // XTMP1 = W[-15]
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0
+ RORXL $2, a, T1; \ // T1 = (a >> 2) // S0
+ ; \
+ XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH
+ VPSRLD $7, XTMP1, XTMP2; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0
+ MOVL a, T1; \ // T1 = a // MAJB
+ ANDL c, T1; \ // T1 = a&c // MAJB
+ ; \
+ ADDL y0, y2; \ // y2 = S1 + CH // --
+ VPSLLD $(32-7), XTMP1, XTMP3; \
+ ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ
+ ADDL y1, h; \ // h = k + w + h + S0 // --
+ ; \
+ ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // --
+ VPOR XTMP2, XTMP3, XTMP3; \ // XTMP3 = W[-15] ror 7
+ ; \
+ VPSRLD $18, XTMP1, XTMP2; \
+ ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// --
+ ADDL y3, h // h = t1 + S0 + MAJ // --
+
+#define ROUND_AND_SCHED_N_1(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
+ ; \ // ################################### RND N + 1 ############################
+ ; \
+ MOVL a, y3; \ // y3 = a // MAJA
+ RORXL $25, e, y0; \ // y0 = e >> 25 // S1A
+ RORXL $11, e, y1; \ // y1 = e >> 11 // S1B
+ ADDL (disp + 1*4)(SP)(SRND*1), h; \ // h = k + w + h // --
+ ORL c, y3; \ // y3 = a|c // MAJA
+ ; \
+ VPSRLD $3, XTMP1, XTMP4; \ // XTMP4 = W[-15] >> 3
+ MOVL f, y2; \ // y2 = f // CH
+ RORXL $13, a, T1; \ // T1 = a >> 13 // S0B
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1
+ XORL g, y2; \ // y2 = f^g // CH
+ ; \
+ RORXL $6, e, y1; \ // y1 = (e >> 6) // S1
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1
+ RORXL $22, a, y1; \ // y1 = a >> 22 // S0A
+ ANDL e, y2; \ // y2 = (f^g)&e // CH
+ ADDL h, d; \ // d = k + w + h + d // --
+ ; \
+ VPSLLD $(32-18), XTMP1, XTMP1; \
+ ANDL b, y3; \ // y3 = (a|c)&b // MAJA
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0
+ ; \
+ VPXOR XTMP1, XTMP3, XTMP3; \
+ RORXL $2, a, T1; \ // T1 = (a >> 2) // S0
+ XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH
+ ; \
+ VPXOR XTMP2, XTMP3, XTMP3; \ // XTMP3 = W[-15] ror 7 ^ W[-15] ror 18
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0
+ MOVL a, T1; \ // T1 = a // MAJB
+ ANDL c, T1; \ // T1 = a&c // MAJB
+ ADDL y0, y2; \ // y2 = S1 + CH // --
+ ; \
+ VPXOR XTMP4, XTMP3, XTMP1; \ // XTMP1 = s0
+ VPSHUFD $0xFA, XDWORD3, XTMP2; \ // XTMP2 = W[-2] {BBAA}
+ ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ
+ ADDL y1, h; \ // h = k + w + h + S0 // --
+ ; \
+ VPADDD XTMP1, XTMP0, XTMP0; \ // XTMP0 = W[-16] + W[-7] + s0
+ ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // --
+ ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// --
+ ADDL y3, h; \ // h = t1 + S0 + MAJ // --
+ ; \
+ VPSRLD $10, XTMP2, XTMP4 // XTMP4 = W[-2] >> 10 {BBAA}
+
+#define ROUND_AND_SCHED_N_2(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
+ ; \ // ################################### RND N + 2 ############################
+ ; \
+ MOVL a, y3; \ // y3 = a // MAJA
+ RORXL $25, e, y0; \ // y0 = e >> 25 // S1A
+ ADDL (disp + 2*4)(SP)(SRND*1), h; \ // h = k + w + h // --
+ ; \
+ VPSRLQ $19, XTMP2, XTMP3; \ // XTMP3 = W[-2] ror 19 {xBxA}
+ RORXL $11, e, y1; \ // y1 = e >> 11 // S1B
+ ORL c, y3; \ // y3 = a|c // MAJA
+ MOVL f, y2; \ // y2 = f // CH
+ XORL g, y2; \ // y2 = f^g // CH
+ ; \
+ RORXL $13, a, T1; \ // T1 = a >> 13 // S0B
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1
+ VPSRLQ $17, XTMP2, XTMP2; \ // XTMP2 = W[-2] ror 17 {xBxA}
+ ANDL e, y2; \ // y2 = (f^g)&e // CH
+ ; \
+ RORXL $6, e, y1; \ // y1 = (e >> 6) // S1
+ VPXOR XTMP3, XTMP2, XTMP2; \
+ ADDL h, d; \ // d = k + w + h + d // --
+ ANDL b, y3; \ // y3 = (a|c)&b // MAJA
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1
+ RORXL $22, a, y1; \ // y1 = a >> 22 // S0A
+ VPXOR XTMP2, XTMP4, XTMP4; \ // XTMP4 = s1 {xBxA}
+ XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH
+ ; \
+ VPSHUFB shuff_00BA<>(SB), XTMP4, XTMP4;\ // XTMP4 = s1 {00BA}
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0
+ RORXL $2, a, T1; \ // T1 = (a >> 2) // S0
+ VPADDD XTMP4, XTMP0, XTMP0; \ // XTMP0 = {..., ..., W[1], W[0]}
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0
+ MOVL a, T1; \ // T1 = a // MAJB
+ ANDL c, T1; \ // T1 = a&c // MAJB
+ ADDL y0, y2; \ // y2 = S1 + CH // --
+ VPSHUFD $80, XTMP0, XTMP2; \ // XTMP2 = W[-2] {DDCC}
+ ; \
+ ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ
+ ADDL y1, h; \ // h = k + w + h + S0 // --
+ ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // --
+ ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// --
+ ; \
+ ADDL y3, h // h = t1 + S0 + MAJ // --
+
+#define ROUND_AND_SCHED_N_3(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
+ ; \ // ################################### RND N + 3 ############################
+ ; \
+ MOVL a, y3; \ // y3 = a // MAJA
+ RORXL $25, e, y0; \ // y0 = e >> 25 // S1A
+ RORXL $11, e, y1; \ // y1 = e >> 11 // S1B
+ ADDL (disp + 3*4)(SP)(SRND*1), h; \ // h = k + w + h // --
+ ORL c, y3; \ // y3 = a|c // MAJA
+ ; \
+ VPSRLD $10, XTMP2, XTMP5; \ // XTMP5 = W[-2] >> 10 {DDCC}
+ MOVL f, y2; \ // y2 = f // CH
+ RORXL $13, a, T1; \ // T1 = a >> 13 // S0B
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1
+ XORL g, y2; \ // y2 = f^g // CH
+ ; \
+ VPSRLQ $19, XTMP2, XTMP3; \ // XTMP3 = W[-2] ror 19 {xDxC}
+ RORXL $6, e, y1; \ // y1 = (e >> 6) // S1
+ ANDL e, y2; \ // y2 = (f^g)&e // CH
+ ADDL h, d; \ // d = k + w + h + d // --
+ ANDL b, y3; \ // y3 = (a|c)&b // MAJA
+ ; \
+ VPSRLQ $17, XTMP2, XTMP2; \ // XTMP2 = W[-2] ror 17 {xDxC}
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1
+ XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH
+ ; \
+ VPXOR XTMP3, XTMP2, XTMP2; \
+ RORXL $22, a, y1; \ // y1 = a >> 22 // S0A
+ ADDL y0, y2; \ // y2 = S1 + CH // --
+ ; \
+ VPXOR XTMP2, XTMP5, XTMP5; \ // XTMP5 = s1 {xDxC}
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0
+ ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // --
+ ; \
+ RORXL $2, a, T1; \ // T1 = (a >> 2) // S0
+ ; \
+ VPSHUFB shuff_DC00<>(SB), XTMP5, XTMP5;\ // XTMP5 = s1 {DC00}
+ ; \
+ VPADDD XTMP0, XTMP5, XDWORD0; \ // XDWORD0 = {W[3], W[2], W[1], W[0]}
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0
+ MOVL a, T1; \ // T1 = a // MAJB
+ ANDL c, T1; \ // T1 = a&c // MAJB
+ ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ
+ ; \
+ ADDL y1, h; \ // h = k + w + h + S0 // --
+ ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// --
+ ADDL y3, h // h = t1 + S0 + MAJ // --
+
+#define DO_ROUND_N_0(disp, a, b, c, d, e, f, g, h, old_h) \
+ ; \ // ################################### RND N + 0 ###########################
+ MOVL f, y2; \ // y2 = f // CH
+ RORXL $25, e, y0; \ // y0 = e >> 25 // S1A
+ RORXL $11, e, y1; \ // y1 = e >> 11 // S1B
+ XORL g, y2; \ // y2 = f^g // CH
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1
+ RORXL $6, e, y1; \ // y1 = (e >> 6) // S1
+ ANDL e, y2; \ // y2 = (f^g)&e // CH
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1
+ RORXL $13, a, T1; \ // T1 = a >> 13 // S0B
+ XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH
+ RORXL $22, a, y1; \ // y1 = a >> 22 // S0A
+ MOVL a, y3; \ // y3 = a // MAJA
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0
+ RORXL $2, a, T1; \ // T1 = (a >> 2) // S0
+ ADDL (disp + 0*4)(SP)(SRND*1), h; \ // h = k + w + h // --
+ ORL c, y3; \ // y3 = a|c // MAJA
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0
+ MOVL a, T1; \ // T1 = a // MAJB
+ ANDL b, y3; \ // y3 = (a|c)&b // MAJA
+ ANDL c, T1; \ // T1 = a&c // MAJB
+ ADDL y0, y2; \ // y2 = S1 + CH // --
+ ; \
+ ADDL h, d; \ // d = k + w + h + d // --
+ ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ
+ ADDL y1, h; \ // h = k + w + h + S0 // --
+ ADDL y2, d // d = k + w + h + d + S1 + CH = d + t1 // --
+
+#define DO_ROUND_N_1(disp, a, b, c, d, e, f, g, h, old_h) \
+ ; \ // ################################### RND N + 1 ###########################
+ ADDL y2, old_h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0 // --
+ MOVL f, y2; \ // y2 = f // CH
+ RORXL $25, e, y0; \ // y0 = e >> 25 // S1A
+ RORXL $11, e, y1; \ // y1 = e >> 11 // S1B
+ XORL g, y2; \ // y2 = f^g // CH
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1
+ RORXL $6, e, y1; \ // y1 = (e >> 6) // S1
+ ANDL e, y2; \ // y2 = (f^g)&e // CH
+ ADDL y3, old_h; \ // h = t1 + S0 + MAJ // --
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1
+ RORXL $13, a, T1; \ // T1 = a >> 13 // S0B
+ XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH
+ RORXL $22, a, y1; \ // y1 = a >> 22 // S0A
+ MOVL a, y3; \ // y3 = a // MAJA
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0
+ RORXL $2, a, T1; \ // T1 = (a >> 2) // S0
+ ADDL (disp + 1*4)(SP)(SRND*1), h; \ // h = k + w + h // --
+ ORL c, y3; \ // y3 = a|c // MAJA
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0
+ MOVL a, T1; \ // T1 = a // MAJB
+ ANDL b, y3; \ // y3 = (a|c)&b // MAJA
+ ANDL c, T1; \ // T1 = a&c // MAJB
+ ADDL y0, y2; \ // y2 = S1 + CH // --
+ ; \
+ ADDL h, d; \ // d = k + w + h + d // --
+ ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ
+ ADDL y1, h; \ // h = k + w + h + S0 // --
+ ; \
+ ADDL y2, d // d = k + w + h + d + S1 + CH = d + t1 // --
+
+#define DO_ROUND_N_2(disp, a, b, c, d, e, f, g, h, old_h) \
+ ; \ // ################################### RND N + 2 ##############################
+ ADDL y2, old_h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// --
+ MOVL f, y2; \ // y2 = f // CH
+ RORXL $25, e, y0; \ // y0 = e >> 25 // S1A
+ RORXL $11, e, y1; \ // y1 = e >> 11 // S1B
+ XORL g, y2; \ // y2 = f^g // CH
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1
+ RORXL $6, e, y1; \ // y1 = (e >> 6) // S1
+ ANDL e, y2; \ // y2 = (f^g)&e // CH
+ ADDL y3, old_h; \ // h = t1 + S0 + MAJ // --
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1
+ RORXL $13, a, T1; \ // T1 = a >> 13 // S0B
+ XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH
+ RORXL $22, a, y1; \ // y1 = a >> 22 // S0A
+ MOVL a, y3; \ // y3 = a // MAJA
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0
+ RORXL $2, a, T1; \ // T1 = (a >> 2) // S0
+ ADDL (disp + 2*4)(SP)(SRND*1), h; \ // h = k + w + h // --
+ ORL c, y3; \ // y3 = a|c // MAJA
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0
+ MOVL a, T1; \ // T1 = a // MAJB
+ ANDL b, y3; \ // y3 = (a|c)&b // MAJA
+ ANDL c, T1; \ // T1 = a&c // MAJB
+ ADDL y0, y2; \ // y2 = S1 + CH // --
+ ; \
+ ADDL h, d; \ // d = k + w + h + d // --
+ ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ
+ ADDL y1, h; \ // h = k + w + h + S0 // --
+ ; \
+ ADDL y2, d // d = k + w + h + d + S1 + CH = d + t1 // --
+
+#define DO_ROUND_N_3(disp, a, b, c, d, e, f, g, h, old_h) \
+ ; \ // ################################### RND N + 3 ###########################
+ ADDL y2, old_h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// --
+ MOVL f, y2; \ // y2 = f // CH
+ RORXL $25, e, y0; \ // y0 = e >> 25 // S1A
+ RORXL $11, e, y1; \ // y1 = e >> 11 // S1B
+ XORL g, y2; \ // y2 = f^g // CH
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1
+ RORXL $6, e, y1; \ // y1 = (e >> 6) // S1
+ ANDL e, y2; \ // y2 = (f^g)&e // CH
+ ADDL y3, old_h; \ // h = t1 + S0 + MAJ // --
+ ; \
+ XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1
+ RORXL $13, a, T1; \ // T1 = a >> 13 // S0B
+ XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH
+ RORXL $22, a, y1; \ // y1 = a >> 22 // S0A
+ MOVL a, y3; \ // y3 = a // MAJA
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0
+ RORXL $2, a, T1; \ // T1 = (a >> 2) // S0
+ ADDL (disp + 3*4)(SP)(SRND*1), h; \ // h = k + w + h // --
+ ORL c, y3; \ // y3 = a|c // MAJA
+ ; \
+ XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0
+ MOVL a, T1; \ // T1 = a // MAJB
+ ANDL b, y3; \ // y3 = (a|c)&b // MAJA
+ ANDL c, T1; \ // T1 = a&c // MAJB
+ ADDL y0, y2; \ // y2 = S1 + CH // --
+ ; \
+ ADDL h, d; \ // d = k + w + h + d // --
+ ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ
+ ADDL y1, h; \ // h = k + w + h + S0 // --
+ ; \
+ ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // --
+ ; \
+ ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// --
+ ; \
+ ADDL y3, h // h = t1 + S0 + MAJ // --
+
+// Definitions for sha-ni version
+//
+// The sha-ni implementation uses Intel(R) SHA extensions SHA256RNDS2, SHA256MSG1, SHA256MSG2
+// It also reuses portions of the flip_mask (half) and K256 table (stride 32) from the avx2 version
+//
+// Reference
+// S. Gulley, et al, "New Instructions Supporting the Secure Hash
+// Algorithm on Intel® Architecture Processors", July 2013
+// https://www.intel.com/content/www/us/en/developer/articles/technical/intel-sha-extensions.html
+//
+
+#define digestPtr DI // input/output, base pointer to digest hash vector H0, H1, ..., H7
+#define dataPtr SI // input, base pointer to first input data block
+#define numBytes DX // input, number of input bytes to be processed
+#define sha256Constants AX // round contents from K256 table, indexed by round number x 32
+#define msg X0 // input data
+#define state0 X1 // round intermediates and outputs
+#define state1 X2
+#define m0 X3 // m0, m1,... m4 -- round message temps
+#define m1 X4
+#define m2 X5
+#define m3 X6
+#define m4 X7
+#define shufMask X8 // input data endian conversion control mask
+#define abefSave X9 // digest hash vector inter-block buffer abef
+#define cdghSave X10 // digest hash vector inter-block buffer cdgh
+
+#define nop(m,a) // nop instead of final SHA256MSG1 for first and last few rounds
+
+#define sha256msg1(m,a) \ // final SHA256MSG1 for middle rounds that require it
+ SHA256MSG1 m, a
+
+#define vmov(a,b) \ // msg copy for all but rounds 12-15
+ VMOVDQA a, b
+
+#define vmovrev(a,b) \ // reverse copy for rounds 12-15
+ VMOVDQA b, a
+
+// sha rounds 0 to 11
+// identical with the exception of the final msg op
+// which is replaced with a nop for rounds where it is not needed
+// refer to Gulley, et al for more information
+#define rounds0to11(m,a,c,sha256Msg1) \
+ VMOVDQU c*16(dataPtr), msg \
+ PSHUFB shufMask, msg \
+ VMOVDQA msg, m \
+ PADDD (c*32)(sha256Constants), msg \
+ SHA256RNDS2 msg, state0, state1 \
+ PSHUFD $0x0e, msg, msg \
+ SHA256RNDS2 msg, state1, state0 \
+ sha256Msg1 (m,a)
+
+// sha rounds 12 to 59
+// identical with the exception of the final msg op
+// and the reverse copy(m,msg) in round 12 which is required
+// after the last data load
+// refer to Gulley, et al for more information
+#define rounds12to59(m,c,a,t,sha256Msg1,movop) \
+ movop (m,msg) \
+ PADDD (c*32)(sha256Constants), msg \
+ SHA256RNDS2 msg, state0, state1 \
+ VMOVDQA m, m4 \
+ PALIGNR $4, a, m4 \
+ PADDD m4, t \
+ SHA256MSG2 m, t \
+ PSHUFD $0x0e, msg, msg \
+ SHA256RNDS2 msg, state1, state0 \
+ sha256Msg1 (m,a)
+
+TEXT ·block(SB), 0, $536-32
+ CMPB ·useSHA(SB), $1
+ JE sha_ni
+ CMPB ·useAVX2(SB), $1
+ JE avx2
+
+ MOVQ p_base+8(FP), SI
+ MOVQ p_len+16(FP), DX
+ SHRQ $6, DX
+ SHLQ $6, DX
+
+ LEAQ (SI)(DX*1), DI
+ MOVQ DI, 256(SP)
+ CMPQ SI, DI
+ JEQ end
+
+ MOVQ dig+0(FP), BP
+ MOVL (0*4)(BP), R8 // a = H0
+ MOVL (1*4)(BP), R9 // b = H1
+ MOVL (2*4)(BP), R10 // c = H2
+ MOVL (3*4)(BP), R11 // d = H3
+ MOVL (4*4)(BP), R12 // e = H4
+ MOVL (5*4)(BP), R13 // f = H5
+ MOVL (6*4)(BP), R14 // g = H6
+ MOVL (7*4)(BP), R15 // h = H7
+
+loop:
+ MOVQ SP, BP
+
+ SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND0(2, 0xb5c0fbcf, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND0(3, 0xe9b5dba5, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND0(4, 0x3956c25b, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND0(5, 0x59f111f1, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND0(6, 0x923f82a4, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND0(7, 0xab1c5ed5, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND0(8, 0xd807aa98, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND0(9, 0x12835b01, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND0(10, 0x243185be, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND0(11, 0x550c7dc3, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND0(12, 0x72be5d74, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND0(13, 0x80deb1fe, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND0(14, 0x9bdc06a7, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND0(15, 0xc19bf174, R9, R10, R11, R12, R13, R14, R15, R8)
+
+ SHA256ROUND1(16, 0xe49b69c1, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(17, 0xefbe4786, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(18, 0x0fc19dc6, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(19, 0x240ca1cc, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(20, 0x2de92c6f, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(21, 0x4a7484aa, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(22, 0x5cb0a9dc, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(23, 0x76f988da, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(24, 0x983e5152, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(25, 0xa831c66d, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(26, 0xb00327c8, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(27, 0xbf597fc7, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(28, 0xc6e00bf3, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(29, 0xd5a79147, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(30, 0x06ca6351, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(31, 0x14292967, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(32, 0x27b70a85, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(33, 0x2e1b2138, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(34, 0x4d2c6dfc, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(35, 0x53380d13, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(36, 0x650a7354, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(37, 0x766a0abb, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(38, 0x81c2c92e, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(39, 0x92722c85, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(40, 0xa2bfe8a1, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(41, 0xa81a664b, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(42, 0xc24b8b70, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(43, 0xc76c51a3, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(44, 0xd192e819, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(45, 0xd6990624, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(46, 0xf40e3585, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(47, 0x106aa070, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(48, 0x19a4c116, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(49, 0x1e376c08, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(50, 0x2748774c, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(51, 0x34b0bcb5, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(52, 0x391c0cb3, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(53, 0x4ed8aa4a, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(54, 0x5b9cca4f, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(55, 0x682e6ff3, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(56, 0x748f82ee, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(57, 0x78a5636f, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(58, 0x84c87814, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(59, 0x8cc70208, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(60, 0x90befffa, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(61, 0xa4506ceb, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8)
+
+ MOVQ dig+0(FP), BP
+ ADDL (0*4)(BP), R8 // H0 = a + H0
+ MOVL R8, (0*4)(BP)
+ ADDL (1*4)(BP), R9 // H1 = b + H1
+ MOVL R9, (1*4)(BP)
+ ADDL (2*4)(BP), R10 // H2 = c + H2
+ MOVL R10, (2*4)(BP)
+ ADDL (3*4)(BP), R11 // H3 = d + H3
+ MOVL R11, (3*4)(BP)
+ ADDL (4*4)(BP), R12 // H4 = e + H4
+ MOVL R12, (4*4)(BP)
+ ADDL (5*4)(BP), R13 // H5 = f + H5
+ MOVL R13, (5*4)(BP)
+ ADDL (6*4)(BP), R14 // H6 = g + H6
+ MOVL R14, (6*4)(BP)
+ ADDL (7*4)(BP), R15 // H7 = h + H7
+ MOVL R15, (7*4)(BP)
+
+ ADDQ $64, SI
+ CMPQ SI, 256(SP)
+ JB loop
+
+end:
+ RET
+
+avx2:
+ MOVQ dig+0(FP), CTX // d.h[8]
+ MOVQ p_base+8(FP), INP
+ MOVQ p_len+16(FP), NUM_BYTES
+
+ LEAQ -64(INP)(NUM_BYTES*1), NUM_BYTES // Pointer to the last block
+ MOVQ NUM_BYTES, _INP_END(SP)
+
+ CMPQ NUM_BYTES, INP
+ JE avx2_only_one_block
+
+ // Load initial digest
+ MOVL 0(CTX), a // a = H0
+ MOVL 4(CTX), b // b = H1
+ MOVL 8(CTX), c // c = H2
+ MOVL 12(CTX), d // d = H3
+ MOVL 16(CTX), e // e = H4
+ MOVL 20(CTX), f // f = H5
+ MOVL 24(CTX), g // g = H6
+ MOVL 28(CTX), h // h = H7
+
+avx2_loop0: // at each iteration works with one block (512 bit)
+
+ VMOVDQU (0*32)(INP), XTMP0
+ VMOVDQU (1*32)(INP), XTMP1
+ VMOVDQU (2*32)(INP), XTMP2
+ VMOVDQU (3*32)(INP), XTMP3
+
+ VMOVDQU flip_mask<>(SB), BYTE_FLIP_MASK
+
+ // Apply Byte Flip Mask: LE -> BE
+ VPSHUFB BYTE_FLIP_MASK, XTMP0, XTMP0
+ VPSHUFB BYTE_FLIP_MASK, XTMP1, XTMP1
+ VPSHUFB BYTE_FLIP_MASK, XTMP2, XTMP2
+ VPSHUFB BYTE_FLIP_MASK, XTMP3, XTMP3
+
+ // Transpose data into high/low parts
+ VPERM2I128 $0x20, XTMP2, XTMP0, XDWORD0 // w3, w2, w1, w0
+ VPERM2I128 $0x31, XTMP2, XTMP0, XDWORD1 // w7, w6, w5, w4
+ VPERM2I128 $0x20, XTMP3, XTMP1, XDWORD2 // w11, w10, w9, w8
+ VPERM2I128 $0x31, XTMP3, XTMP1, XDWORD3 // w15, w14, w13, w12
+
+ MOVQ $K256<>(SB), TBL // Loading address of table with round-specific constants
+
+avx2_last_block_enter:
+ ADDQ $64, INP
+ MOVQ INP, _INP(SP)
+ XORQ SRND, SRND
+
+avx2_loop1: // for w0 - w47
+ // Do 4 rounds and scheduling
+ VPADDD 0*32(TBL)(SRND*1), XDWORD0, XFER
+ VMOVDQU XFER, (_XFER + 0*32)(SP)(SRND*1)
+ ROUND_AND_SCHED_N_0(_XFER + 0*32, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
+ ROUND_AND_SCHED_N_1(_XFER + 0*32, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
+ ROUND_AND_SCHED_N_2(_XFER + 0*32, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
+ ROUND_AND_SCHED_N_3(_XFER + 0*32, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
+
+ // Do 4 rounds and scheduling
+ VPADDD 1*32(TBL)(SRND*1), XDWORD1, XFER
+ VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1)
+ ROUND_AND_SCHED_N_0(_XFER + 1*32, e, f, g, h, a, b, c, d, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
+ ROUND_AND_SCHED_N_1(_XFER + 1*32, d, e, f, g, h, a, b, c, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
+ ROUND_AND_SCHED_N_2(_XFER + 1*32, c, d, e, f, g, h, a, b, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
+ ROUND_AND_SCHED_N_3(_XFER + 1*32, b, c, d, e, f, g, h, a, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
+
+ // Do 4 rounds and scheduling
+ VPADDD 2*32(TBL)(SRND*1), XDWORD2, XFER
+ VMOVDQU XFER, (_XFER + 2*32)(SP)(SRND*1)
+ ROUND_AND_SCHED_N_0(_XFER + 2*32, a, b, c, d, e, f, g, h, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
+ ROUND_AND_SCHED_N_1(_XFER + 2*32, h, a, b, c, d, e, f, g, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
+ ROUND_AND_SCHED_N_2(_XFER + 2*32, g, h, a, b, c, d, e, f, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
+ ROUND_AND_SCHED_N_3(_XFER + 2*32, f, g, h, a, b, c, d, e, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
+
+ // Do 4 rounds and scheduling
+ VPADDD 3*32(TBL)(SRND*1), XDWORD3, XFER
+ VMOVDQU XFER, (_XFER + 3*32)(SP)(SRND*1)
+ ROUND_AND_SCHED_N_0(_XFER + 3*32, e, f, g, h, a, b, c, d, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
+ ROUND_AND_SCHED_N_1(_XFER + 3*32, d, e, f, g, h, a, b, c, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
+ ROUND_AND_SCHED_N_2(_XFER + 3*32, c, d, e, f, g, h, a, b, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
+ ROUND_AND_SCHED_N_3(_XFER + 3*32, b, c, d, e, f, g, h, a, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
+
+ ADDQ $4*32, SRND
+ CMPQ SRND, $3*4*32
+ JB avx2_loop1
+
+avx2_loop2:
+ // w48 - w63 processed with no scheduling (last 16 rounds)
+ VPADDD 0*32(TBL)(SRND*1), XDWORD0, XFER
+ VMOVDQU XFER, (_XFER + 0*32)(SP)(SRND*1)
+ DO_ROUND_N_0(_XFER + 0*32, a, b, c, d, e, f, g, h, h)
+ DO_ROUND_N_1(_XFER + 0*32, h, a, b, c, d, e, f, g, h)
+ DO_ROUND_N_2(_XFER + 0*32, g, h, a, b, c, d, e, f, g)
+ DO_ROUND_N_3(_XFER + 0*32, f, g, h, a, b, c, d, e, f)
+
+ VPADDD 1*32(TBL)(SRND*1), XDWORD1, XFER
+ VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1)
+ DO_ROUND_N_0(_XFER + 1*32, e, f, g, h, a, b, c, d, e)
+ DO_ROUND_N_1(_XFER + 1*32, d, e, f, g, h, a, b, c, d)
+ DO_ROUND_N_2(_XFER + 1*32, c, d, e, f, g, h, a, b, c)
+ DO_ROUND_N_3(_XFER + 1*32, b, c, d, e, f, g, h, a, b)
+
+ ADDQ $2*32, SRND
+
+ VMOVDQU XDWORD2, XDWORD0
+ VMOVDQU XDWORD3, XDWORD1
+
+ CMPQ SRND, $4*4*32
+ JB avx2_loop2
+
+ MOVQ dig+0(FP), CTX // d.h[8]
+ MOVQ _INP(SP), INP
+
+ addm( 0(CTX), a)
+ addm( 4(CTX), b)
+ addm( 8(CTX), c)
+ addm( 12(CTX), d)
+ addm( 16(CTX), e)
+ addm( 20(CTX), f)
+ addm( 24(CTX), g)
+ addm( 28(CTX), h)
+
+ CMPQ _INP_END(SP), INP
+ JB done_hash
+
+ XORQ SRND, SRND
+
+avx2_loop3: // Do second block using previously scheduled results
+ DO_ROUND_N_0(_XFER + 0*32 + 16, a, b, c, d, e, f, g, h, a)
+ DO_ROUND_N_1(_XFER + 0*32 + 16, h, a, b, c, d, e, f, g, h)
+ DO_ROUND_N_2(_XFER + 0*32 + 16, g, h, a, b, c, d, e, f, g)
+ DO_ROUND_N_3(_XFER + 0*32 + 16, f, g, h, a, b, c, d, e, f)
+
+ DO_ROUND_N_0(_XFER + 1*32 + 16, e, f, g, h, a, b, c, d, e)
+ DO_ROUND_N_1(_XFER + 1*32 + 16, d, e, f, g, h, a, b, c, d)
+ DO_ROUND_N_2(_XFER + 1*32 + 16, c, d, e, f, g, h, a, b, c)
+ DO_ROUND_N_3(_XFER + 1*32 + 16, b, c, d, e, f, g, h, a, b)
+
+ ADDQ $2*32, SRND
+ CMPQ SRND, $4*4*32
+ JB avx2_loop3
+
+ MOVQ dig+0(FP), CTX // d.h[8]
+ MOVQ _INP(SP), INP
+ ADDQ $64, INP
+
+ addm( 0(CTX), a)
+ addm( 4(CTX), b)
+ addm( 8(CTX), c)
+ addm( 12(CTX), d)
+ addm( 16(CTX), e)
+ addm( 20(CTX), f)
+ addm( 24(CTX), g)
+ addm( 28(CTX), h)
+
+ CMPQ _INP_END(SP), INP
+ JA avx2_loop0
+ JB done_hash
+
+avx2_do_last_block:
+
+ VMOVDQU 0(INP), XWORD0
+ VMOVDQU 16(INP), XWORD1
+ VMOVDQU 32(INP), XWORD2
+ VMOVDQU 48(INP), XWORD3
+
+ VMOVDQU flip_mask<>(SB), BYTE_FLIP_MASK
+
+ VPSHUFB X_BYTE_FLIP_MASK, XWORD0, XWORD0
+ VPSHUFB X_BYTE_FLIP_MASK, XWORD1, XWORD1
+ VPSHUFB X_BYTE_FLIP_MASK, XWORD2, XWORD2
+ VPSHUFB X_BYTE_FLIP_MASK, XWORD3, XWORD3
+
+ MOVQ $K256<>(SB), TBL
+
+ JMP avx2_last_block_enter
+
+avx2_only_one_block:
+ // Load initial digest
+ MOVL 0(CTX), a // a = H0
+ MOVL 4(CTX), b // b = H1
+ MOVL 8(CTX), c // c = H2
+ MOVL 12(CTX), d // d = H3
+ MOVL 16(CTX), e // e = H4
+ MOVL 20(CTX), f // f = H5
+ MOVL 24(CTX), g // g = H6
+ MOVL 28(CTX), h // h = H7
+
+ JMP avx2_do_last_block
+
+done_hash:
+ VZEROUPPER
+ RET
+
+sha_ni:
+ MOVQ dig+0(FP), digestPtr // init digest hash vector H0, H1,..., H7 pointer
+ MOVQ p_base+8(FP), dataPtr // init input data base pointer
+ MOVQ p_len+16(FP), numBytes // get number of input bytes to hash
+ SHRQ $6, numBytes // force modulo 64 input buffer length
+ SHLQ $6, numBytes
+ CMPQ numBytes, $0 // exit early for zero-length input buffer
+ JEQ done
+ ADDQ dataPtr, numBytes // point numBytes to end of input buffer
+ VMOVDQU (0*16)(digestPtr), state0 // load initial hash values and reorder
+ VMOVDQU (1*16)(digestPtr), state1 // DCBA, HGFE -> ABEF, CDGH
+ PSHUFD $0xb1, state0, state0 // CDAB
+ PSHUFD $0x1b, state1, state1 // EFGH
+ VMOVDQA state0, m4
+ PALIGNR $8, state1, state0 // ABEF
+ PBLENDW $0xf0, m4, state1 // CDGH
+ VMOVDQA flip_mask<>(SB), shufMask
+ LEAQ K256<>(SB), sha256Constants
+
+roundLoop:
+ // save hash values for addition after rounds
+ VMOVDQA state0, abefSave
+ VMOVDQA state1, cdghSave
+
+ // do rounds 0-59
+ rounds0to11 (m0,-,0,nop) // 0-3
+ rounds0to11 (m1,m0,1,sha256msg1) // 4-7
+ rounds0to11 (m2,m1,2,sha256msg1) // 8-11
+ VMOVDQU (3*16)(dataPtr), msg
+ PSHUFB shufMask, msg
+ rounds12to59 (m3,3,m2,m0,sha256msg1,vmovrev) // 12-15
+ rounds12to59 (m0,4,m3,m1,sha256msg1,vmov) // 16-19
+ rounds12to59 (m1,5,m0,m2,sha256msg1,vmov) // 20-23
+ rounds12to59 (m2,6,m1,m3,sha256msg1,vmov) // 24-27
+ rounds12to59 (m3,7,m2,m0,sha256msg1,vmov) // 28-31
+ rounds12to59 (m0,8,m3,m1,sha256msg1,vmov) // 32-35
+ rounds12to59 (m1,9,m0,m2,sha256msg1,vmov) // 36-39
+ rounds12to59 (m2,10,m1,m3,sha256msg1,vmov) // 40-43
+ rounds12to59 (m3,11,m2,m0,sha256msg1,vmov) // 44-47
+ rounds12to59 (m0,12,m3,m1,sha256msg1,vmov) // 48-51
+ rounds12to59 (m1,13,m0,m2,nop,vmov) // 52-55
+ rounds12to59 (m2,14,m1,m3,nop,vmov) // 56-59
+
+ // do rounds 60-63
+ VMOVDQA m3, msg
+ PADDD (15*32)(sha256Constants), msg
+ SHA256RNDS2 msg, state0, state1
+ PSHUFD $0x0e, msg, msg
+ SHA256RNDS2 msg, state1, state0
+
+ // add current hash values with previously saved
+ PADDD abefSave, state0
+ PADDD cdghSave, state1
+
+ // advance data pointer; loop until buffer empty
+ ADDQ $64, dataPtr
+ CMPQ numBytes, dataPtr
+ JNE roundLoop
+
+ // write hash values back in the correct order
+ PSHUFD $0x1b, state0, state0 // FEBA
+ PSHUFD $0xb1, state1, state1 // DCHG
+ VMOVDQA state0, m4
+ PBLENDW $0xf0, state1, state0 // DCBA
+ PALIGNR $8, m4, state1 // HGFE
+ VMOVDQU state0, (0*16)(digestPtr)
+ VMOVDQU state1, (1*16)(digestPtr)
+
+done:
+ RET
+
+// shuffle byte order from LE to BE
+DATA flip_mask<>+0x00(SB)/8, $0x0405060700010203
+DATA flip_mask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b
+DATA flip_mask<>+0x10(SB)/8, $0x0405060700010203
+DATA flip_mask<>+0x18(SB)/8, $0x0c0d0e0f08090a0b
+GLOBL flip_mask<>(SB), 8, $32
+
+// shuffle xBxA -> 00BA
+DATA shuff_00BA<>+0x00(SB)/8, $0x0b0a090803020100
+DATA shuff_00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF
+DATA shuff_00BA<>+0x10(SB)/8, $0x0b0a090803020100
+DATA shuff_00BA<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF
+GLOBL shuff_00BA<>(SB), 8, $32
+
+// shuffle xDxC -> DC00
+DATA shuff_DC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF
+DATA shuff_DC00<>+0x08(SB)/8, $0x0b0a090803020100
+DATA shuff_DC00<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF
+DATA shuff_DC00<>+0x18(SB)/8, $0x0b0a090803020100
+GLOBL shuff_DC00<>(SB), 8, $32
+
+// Round specific constants
+DATA K256<>+0x00(SB)/4, $0x428a2f98 // k1
+DATA K256<>+0x04(SB)/4, $0x71374491 // k2
+DATA K256<>+0x08(SB)/4, $0xb5c0fbcf // k3
+DATA K256<>+0x0c(SB)/4, $0xe9b5dba5 // k4
+DATA K256<>+0x10(SB)/4, $0x428a2f98 // k1
+DATA K256<>+0x14(SB)/4, $0x71374491 // k2
+DATA K256<>+0x18(SB)/4, $0xb5c0fbcf // k3
+DATA K256<>+0x1c(SB)/4, $0xe9b5dba5 // k4
+
+DATA K256<>+0x20(SB)/4, $0x3956c25b // k5 - k8
+DATA K256<>+0x24(SB)/4, $0x59f111f1
+DATA K256<>+0x28(SB)/4, $0x923f82a4
+DATA K256<>+0x2c(SB)/4, $0xab1c5ed5
+DATA K256<>+0x30(SB)/4, $0x3956c25b
+DATA K256<>+0x34(SB)/4, $0x59f111f1
+DATA K256<>+0x38(SB)/4, $0x923f82a4
+DATA K256<>+0x3c(SB)/4, $0xab1c5ed5
+
+DATA K256<>+0x40(SB)/4, $0xd807aa98 // k9 - k12
+DATA K256<>+0x44(SB)/4, $0x12835b01
+DATA K256<>+0x48(SB)/4, $0x243185be
+DATA K256<>+0x4c(SB)/4, $0x550c7dc3
+DATA K256<>+0x50(SB)/4, $0xd807aa98
+DATA K256<>+0x54(SB)/4, $0x12835b01
+DATA K256<>+0x58(SB)/4, $0x243185be
+DATA K256<>+0x5c(SB)/4, $0x550c7dc3
+
+DATA K256<>+0x60(SB)/4, $0x72be5d74 // k13 - k16
+DATA K256<>+0x64(SB)/4, $0x80deb1fe
+DATA K256<>+0x68(SB)/4, $0x9bdc06a7
+DATA K256<>+0x6c(SB)/4, $0xc19bf174
+DATA K256<>+0x70(SB)/4, $0x72be5d74
+DATA K256<>+0x74(SB)/4, $0x80deb1fe
+DATA K256<>+0x78(SB)/4, $0x9bdc06a7
+DATA K256<>+0x7c(SB)/4, $0xc19bf174
+
+DATA K256<>+0x80(SB)/4, $0xe49b69c1 // k17 - k20
+DATA K256<>+0x84(SB)/4, $0xefbe4786
+DATA K256<>+0x88(SB)/4, $0x0fc19dc6
+DATA K256<>+0x8c(SB)/4, $0x240ca1cc
+DATA K256<>+0x90(SB)/4, $0xe49b69c1
+DATA K256<>+0x94(SB)/4, $0xefbe4786
+DATA K256<>+0x98(SB)/4, $0x0fc19dc6
+DATA K256<>+0x9c(SB)/4, $0x240ca1cc
+
+DATA K256<>+0xa0(SB)/4, $0x2de92c6f // k21 - k24
+DATA K256<>+0xa4(SB)/4, $0x4a7484aa
+DATA K256<>+0xa8(SB)/4, $0x5cb0a9dc
+DATA K256<>+0xac(SB)/4, $0x76f988da
+DATA K256<>+0xb0(SB)/4, $0x2de92c6f
+DATA K256<>+0xb4(SB)/4, $0x4a7484aa
+DATA K256<>+0xb8(SB)/4, $0x5cb0a9dc
+DATA K256<>+0xbc(SB)/4, $0x76f988da
+
+DATA K256<>+0xc0(SB)/4, $0x983e5152 // k25 - k28
+DATA K256<>+0xc4(SB)/4, $0xa831c66d
+DATA K256<>+0xc8(SB)/4, $0xb00327c8
+DATA K256<>+0xcc(SB)/4, $0xbf597fc7
+DATA K256<>+0xd0(SB)/4, $0x983e5152
+DATA K256<>+0xd4(SB)/4, $0xa831c66d
+DATA K256<>+0xd8(SB)/4, $0xb00327c8
+DATA K256<>+0xdc(SB)/4, $0xbf597fc7
+
+DATA K256<>+0xe0(SB)/4, $0xc6e00bf3 // k29 - k32
+DATA K256<>+0xe4(SB)/4, $0xd5a79147
+DATA K256<>+0xe8(SB)/4, $0x06ca6351
+DATA K256<>+0xec(SB)/4, $0x14292967
+DATA K256<>+0xf0(SB)/4, $0xc6e00bf3
+DATA K256<>+0xf4(SB)/4, $0xd5a79147
+DATA K256<>+0xf8(SB)/4, $0x06ca6351
+DATA K256<>+0xfc(SB)/4, $0x14292967
+
+DATA K256<>+0x100(SB)/4, $0x27b70a85
+DATA K256<>+0x104(SB)/4, $0x2e1b2138
+DATA K256<>+0x108(SB)/4, $0x4d2c6dfc
+DATA K256<>+0x10c(SB)/4, $0x53380d13
+DATA K256<>+0x110(SB)/4, $0x27b70a85
+DATA K256<>+0x114(SB)/4, $0x2e1b2138
+DATA K256<>+0x118(SB)/4, $0x4d2c6dfc
+DATA K256<>+0x11c(SB)/4, $0x53380d13
+
+DATA K256<>+0x120(SB)/4, $0x650a7354
+DATA K256<>+0x124(SB)/4, $0x766a0abb
+DATA K256<>+0x128(SB)/4, $0x81c2c92e
+DATA K256<>+0x12c(SB)/4, $0x92722c85
+DATA K256<>+0x130(SB)/4, $0x650a7354
+DATA K256<>+0x134(SB)/4, $0x766a0abb
+DATA K256<>+0x138(SB)/4, $0x81c2c92e
+DATA K256<>+0x13c(SB)/4, $0x92722c85
+
+DATA K256<>+0x140(SB)/4, $0xa2bfe8a1
+DATA K256<>+0x144(SB)/4, $0xa81a664b
+DATA K256<>+0x148(SB)/4, $0xc24b8b70
+DATA K256<>+0x14c(SB)/4, $0xc76c51a3
+DATA K256<>+0x150(SB)/4, $0xa2bfe8a1
+DATA K256<>+0x154(SB)/4, $0xa81a664b
+DATA K256<>+0x158(SB)/4, $0xc24b8b70
+DATA K256<>+0x15c(SB)/4, $0xc76c51a3
+
+DATA K256<>+0x160(SB)/4, $0xd192e819
+DATA K256<>+0x164(SB)/4, $0xd6990624
+DATA K256<>+0x168(SB)/4, $0xf40e3585
+DATA K256<>+0x16c(SB)/4, $0x106aa070
+DATA K256<>+0x170(SB)/4, $0xd192e819
+DATA K256<>+0x174(SB)/4, $0xd6990624
+DATA K256<>+0x178(SB)/4, $0xf40e3585
+DATA K256<>+0x17c(SB)/4, $0x106aa070
+
+DATA K256<>+0x180(SB)/4, $0x19a4c116
+DATA K256<>+0x184(SB)/4, $0x1e376c08
+DATA K256<>+0x188(SB)/4, $0x2748774c
+DATA K256<>+0x18c(SB)/4, $0x34b0bcb5
+DATA K256<>+0x190(SB)/4, $0x19a4c116
+DATA K256<>+0x194(SB)/4, $0x1e376c08
+DATA K256<>+0x198(SB)/4, $0x2748774c
+DATA K256<>+0x19c(SB)/4, $0x34b0bcb5
+
+DATA K256<>+0x1a0(SB)/4, $0x391c0cb3
+DATA K256<>+0x1a4(SB)/4, $0x4ed8aa4a
+DATA K256<>+0x1a8(SB)/4, $0x5b9cca4f
+DATA K256<>+0x1ac(SB)/4, $0x682e6ff3
+DATA K256<>+0x1b0(SB)/4, $0x391c0cb3
+DATA K256<>+0x1b4(SB)/4, $0x4ed8aa4a
+DATA K256<>+0x1b8(SB)/4, $0x5b9cca4f
+DATA K256<>+0x1bc(SB)/4, $0x682e6ff3
+
+DATA K256<>+0x1c0(SB)/4, $0x748f82ee
+DATA K256<>+0x1c4(SB)/4, $0x78a5636f
+DATA K256<>+0x1c8(SB)/4, $0x84c87814
+DATA K256<>+0x1cc(SB)/4, $0x8cc70208
+DATA K256<>+0x1d0(SB)/4, $0x748f82ee
+DATA K256<>+0x1d4(SB)/4, $0x78a5636f
+DATA K256<>+0x1d8(SB)/4, $0x84c87814
+DATA K256<>+0x1dc(SB)/4, $0x8cc70208
+
+DATA K256<>+0x1e0(SB)/4, $0x90befffa
+DATA K256<>+0x1e4(SB)/4, $0xa4506ceb
+DATA K256<>+0x1e8(SB)/4, $0xbef9a3f7
+DATA K256<>+0x1ec(SB)/4, $0xc67178f2
+DATA K256<>+0x1f0(SB)/4, $0x90befffa
+DATA K256<>+0x1f4(SB)/4, $0xa4506ceb
+DATA K256<>+0x1f8(SB)/4, $0xbef9a3f7
+DATA K256<>+0x1fc(SB)/4, $0xc67178f2
+
+GLOBL K256<>(SB), (NOPTR + RODATA), $512
diff --git a/contrib/go/_std_1.21/src/crypto/sha256/sha256block_arm64.go b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_arm64.go
new file mode 100644
index 0000000000..e5da566363
--- /dev/null
+++ b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_arm64.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha256
+
+import "internal/cpu"
+
+var k = _K
+
+//go:noescape
+func sha256block(h []uint32, p []byte, k []uint32)
+
+func block(dig *digest, p []byte) {
+ if !cpu.ARM64.HasSHA2 {
+ blockGeneric(dig, p)
+ } else {
+ h := dig.h[:]
+ sha256block(h, p, k)
+ }
+}
diff --git a/contrib/go/_std_1.21/src/crypto/sha256/sha256block_arm64.s b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_arm64.s
new file mode 100644
index 0000000000..d5c1eb0b2e
--- /dev/null
+++ b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_arm64.s
@@ -0,0 +1,119 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+#define HASHUPDATE \
+ SHA256H V9.S4, V3, V2 \
+ SHA256H2 V9.S4, V8, V3 \
+ VMOV V2.B16, V8.B16
+
+// func sha256block(h []uint32, p []byte, k []uint32)
+TEXT ·sha256block(SB),NOSPLIT,$0
+ MOVD h_base+0(FP), R0 // Hash value first address
+ MOVD p_base+24(FP), R1 // message first address
+ MOVD k_base+48(FP), R2 // k constants first address
+ MOVD p_len+32(FP), R3 // message length
+ VLD1 (R0), [V0.S4, V1.S4] // load h(a,b,c,d,e,f,g,h)
+ VLD1.P 64(R2), [V16.S4, V17.S4, V18.S4, V19.S4]
+ VLD1.P 64(R2), [V20.S4, V21.S4, V22.S4, V23.S4]
+ VLD1.P 64(R2), [V24.S4, V25.S4, V26.S4, V27.S4]
+ VLD1 (R2), [V28.S4, V29.S4, V30.S4, V31.S4] //load 64*4bytes K constant(K0-K63)
+
+blockloop:
+
+ VLD1.P 16(R1), [V4.B16] // load 16bytes message
+ VLD1.P 16(R1), [V5.B16] // load 16bytes message
+ VLD1.P 16(R1), [V6.B16] // load 16bytes message
+ VLD1.P 16(R1), [V7.B16] // load 16bytes message
+ VMOV V0.B16, V2.B16 // backup: VO h(dcba)
+ VMOV V1.B16, V3.B16 // backup: V1 h(hgfe)
+ VMOV V2.B16, V8.B16
+ VREV32 V4.B16, V4.B16 // prepare for using message in Byte format
+ VREV32 V5.B16, V5.B16
+ VREV32 V6.B16, V6.B16
+ VREV32 V7.B16, V7.B16
+
+ VADD V16.S4, V4.S4, V9.S4 // V18(W0+K0...W3+K3)
+ SHA256SU0 V5.S4, V4.S4 // V4: (su0(W1)+W0,...,su0(W4)+W3)
+ HASHUPDATE // H4
+
+ VADD V17.S4, V5.S4, V9.S4 // V18(W4+K4...W7+K7)
+ SHA256SU0 V6.S4, V5.S4 // V5: (su0(W5)+W4,...,su0(W8)+W7)
+ SHA256SU1 V7.S4, V6.S4, V4.S4 // V4: W16-W19
+ HASHUPDATE // H8
+
+ VADD V18.S4, V6.S4, V9.S4 // V18(W8+K8...W11+K11)
+ SHA256SU0 V7.S4, V6.S4 // V6: (su0(W9)+W8,...,su0(W12)+W11)
+ SHA256SU1 V4.S4, V7.S4, V5.S4 // V5: W20-W23
+ HASHUPDATE // H12
+
+ VADD V19.S4, V7.S4, V9.S4 // V18(W12+K12...W15+K15)
+ SHA256SU0 V4.S4, V7.S4 // V7: (su0(W13)+W12,...,su0(W16)+W15)
+ SHA256SU1 V5.S4, V4.S4, V6.S4 // V6: W24-W27
+ HASHUPDATE // H16
+
+ VADD V20.S4, V4.S4, V9.S4 // V18(W16+K16...W19+K19)
+ SHA256SU0 V5.S4, V4.S4 // V4: (su0(W17)+W16,...,su0(W20)+W19)
+ SHA256SU1 V6.S4, V5.S4, V7.S4 // V7: W28-W31
+ HASHUPDATE // H20
+
+ VADD V21.S4, V5.S4, V9.S4 // V18(W20+K20...W23+K23)
+ SHA256SU0 V6.S4, V5.S4 // V5: (su0(W21)+W20,...,su0(W24)+W23)
+ SHA256SU1 V7.S4, V6.S4, V4.S4 // V4: W32-W35
+ HASHUPDATE // H24
+
+ VADD V22.S4, V6.S4, V9.S4 // V18(W24+K24...W27+K27)
+ SHA256SU0 V7.S4, V6.S4 // V6: (su0(W25)+W24,...,su0(W28)+W27)
+ SHA256SU1 V4.S4, V7.S4, V5.S4 // V5: W36-W39
+ HASHUPDATE // H28
+
+ VADD V23.S4, V7.S4, V9.S4 // V18(W28+K28...W31+K31)
+ SHA256SU0 V4.S4, V7.S4 // V7: (su0(W29)+W28,...,su0(W32)+W31)
+ SHA256SU1 V5.S4, V4.S4, V6.S4 // V6: W40-W43
+ HASHUPDATE // H32
+
+ VADD V24.S4, V4.S4, V9.S4 // V18(W32+K32...W35+K35)
+ SHA256SU0 V5.S4, V4.S4 // V4: (su0(W33)+W32,...,su0(W36)+W35)
+ SHA256SU1 V6.S4, V5.S4, V7.S4 // V7: W44-W47
+ HASHUPDATE // H36
+
+ VADD V25.S4, V5.S4, V9.S4 // V18(W36+K36...W39+K39)
+ SHA256SU0 V6.S4, V5.S4 // V5: (su0(W37)+W36,...,su0(W40)+W39)
+ SHA256SU1 V7.S4, V6.S4, V4.S4 // V4: W48-W51
+ HASHUPDATE // H40
+
+ VADD V26.S4, V6.S4, V9.S4 // V18(W40+K40...W43+K43)
+ SHA256SU0 V7.S4, V6.S4 // V6: (su0(W41)+W40,...,su0(W44)+W43)
+ SHA256SU1 V4.S4, V7.S4, V5.S4 // V5: W52-W55
+ HASHUPDATE // H44
+
+ VADD V27.S4, V7.S4, V9.S4 // V18(W44+K44...W47+K47)
+ SHA256SU0 V4.S4, V7.S4 // V7: (su0(W45)+W44,...,su0(W48)+W47)
+ SHA256SU1 V5.S4, V4.S4, V6.S4 // V6: W56-W59
+ HASHUPDATE // H48
+
+ VADD V28.S4, V4.S4, V9.S4 // V18(W48+K48,...,W51+K51)
+ HASHUPDATE // H52
+ SHA256SU1 V6.S4, V5.S4, V7.S4 // V7: W60-W63
+
+ VADD V29.S4, V5.S4, V9.S4 // V18(W52+K52,...,W55+K55)
+ HASHUPDATE // H56
+
+ VADD V30.S4, V6.S4, V9.S4 // V18(W59+K59,...,W59+K59)
+ HASHUPDATE // H60
+
+ VADD V31.S4, V7.S4, V9.S4 // V18(W60+K60,...,W63+K63)
+ HASHUPDATE // H64
+
+ SUB $64, R3, R3 // message length - 64bytes, then compare with 64bytes
+ VADD V2.S4, V0.S4, V0.S4
+ VADD V3.S4, V1.S4, V1.S4
+ CBNZ R3, blockloop
+
+sha256ret:
+
+ VST1 [V0.S4, V1.S4], (R0) // store hash value H
+ RET
+
diff --git a/contrib/go/_std_1.21/src/crypto/sha256/sha256block_decl.go b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_decl.go
new file mode 100644
index 0000000000..7d68cd95fe
--- /dev/null
+++ b/contrib/go/_std_1.21/src/crypto/sha256/sha256block_decl.go
@@ -0,0 +1,10 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64 || s390x || ppc64le || ppc64
+
+package sha256
+
+//go:noescape
+func block(dig *digest, p []byte)
diff --git a/contrib/go/_std_1.21/src/crypto/sha256/ya.make b/contrib/go/_std_1.21/src/crypto/sha256/ya.make
new file mode 100644
index 0000000000..c4eb086700
--- /dev/null
+++ b/contrib/go/_std_1.21/src/crypto/sha256/ya.make
@@ -0,0 +1,30 @@
+GO_LIBRARY()
+
+SRCS(
+ sha256.go
+ sha256block.go
+)
+
+GO_TEST_SRCS(sha256_test.go)
+
+GO_XTEST_SRCS(example_test.go)
+
+IF (ARCH_X86_64)
+ SRCS(
+ sha256block_amd64.go
+ sha256block_amd64.s
+ sha256block_decl.go
+ )
+ENDIF()
+
+IF (ARCH_ARM64)
+ SRCS(
+ sha256block_arm64.go
+ sha256block_arm64.s
+ )
+ENDIF()
+
+END()
+
+RECURSE(
+)